diff --git a/acuity-metadata.json b/acuity-metadata.json
new file mode 100644
index 00000000000..4d652a51092
--- /dev/null
+++ b/acuity-metadata.json
@@ -0,0 +1,2630 @@
+[
+ {
+ "name": "a_times_b_plus_c",
+ "attributes": [],
+ "inputs": [
+ { "name": "A" },
+ { "name": "B" },
+ { "name": "C" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "abs",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "add",
+ "attributes": [],
+ "inputs": [
+ { "name": "A" },
+ { "name": "B" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "C" }
+ ]
+ },
+ {
+ "name": "addn",
+ "attributes": [],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "argmin",
+ "attributes": [
+ { "name": "axis", "default": -1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "base_input_layer",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "batch2space",
+ "attributes": [
+ { "name": "block_shape", "default": [ 2, 2 ] },
+ { "name": "block_crops", "default": [ [ 0, 0 ], [ 0, 0 ] ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "batchnorm_single",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "eps", "default": 0.0001 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "mean" },
+ { "name": "variance" }
+ ],
+ "constants": [
+ { "name": "bias" },
+ { "name": "scale" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "batchnormalize",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "eps", "default": 0.0001 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "beta" },
+ { "name": "gamma" },
+ { "name": "mean" },
+ { "name": "variance" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "capsule_norm",
+ "category": "Normalization",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "cast",
+ "attributes": [
+ { "name": "in_data_type", "default": 0 },
+ { "name": "out_data_type", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "clipbyvalue",
+ "attributes": [
+ { "name": "clip_value_min", "default": 0 },
+ { "name": "clip_value_max", "default": 255 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "concat",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "dim", "default": 1 }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "out" }
+ ]
+ },
+ {
+ "name": "concatshift",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "dim", "default": 1 },
+ { "name": "keep_size", "default": 1 }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "data" },
+ { "name": "shifted_data" }
+ ]
+ },
+ {
+ "name": "continuationindicator",
+ "attributes": [
+ { "name": "time_step", "default": 0 },
+ { "name": "batch_size", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "conv1d",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "bias", "default": true },
+ { "name": "group_number", "default": 1 },
+ { "name": "ksize", "default": 1 },
+ { "name": "stride", "default": 1 },
+ { "name": "pad", "default": [ 0, 0 ] },
+ { "name": "dilation", "default": [ 1, 1, 1 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "conv2d_op",
+ "category": "Layer",
+ "attributes": [
+ { "name": "padding", "default": "VALID" },
+ { "name": "group_number", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "dilation", "default": [ 1, 1, 1 ] },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weight" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "conv3d",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "bias", "default": false },
+ { "name": "group_number", "default": 1 },
+ { "name": "ksize_d", "default": 1 },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_d", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "pad_method", "default": "padding_const" },
+ { "name": "pad", "default": [ 0, 0, 0, 0, 0, 0 ] },
+ { "name": "dilation", "default": [ 1, 1, 1, 1, 1 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "convolution",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "bias", "default": true },
+ { "name": "group_number", "default": 1 },
+ { "name": "regularize", "default": false },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "dilation", "default": [ 1, 1, 1, 1 ] },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 },
+ { "name": "multiplier", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "crop_image",
+ "attributes": [
+ { "name": "crop_size", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "cropandresize",
+ "category": "Layer",
+ "attributes": [
+ { "name": "num_crop_boxes", "default": 0 },
+ { "name": "crop_size", "default": [] },
+ { "name": "resize_method", "default": "bilinear" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "boxes" },
+ { "name": "box_ind" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "ctc_loss_layer",
+ "attributes": [
+ { "name": "time_major", "default": false }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "customlayer",
+ "attributes": [],
+ "inputs": [],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "deconvolution",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "bias", "default": true },
+ { "name": "group_number", "default": 1 },
+ { "name": "regularize", "default": false },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 },
+ { "name": "output_shape", "default": [] },
+ { "name": "output_padding_h", "default": 0 },
+ { "name": "output_padding_w", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "depth2space",
+ "attributes": [
+ { "name": "block_size", "default": 2 },
+ { "name": "mode", "default": "DCR" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "depthwise_conv1d",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "bias", "default": true },
+ { "name": "group_number", "default": 2 },
+ { "name": "ksize", "default": 1 },
+ { "name": "stride", "default": 1 },
+ { "name": "pad", "default": [ 0, 0 ] },
+ { "name": "dilation", "default": [ 1, 1, 1 ] },
+ { "name": "multiplier", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "depthwise_conv2d_op",
+ "category": "Layer",
+ "attributes": [
+ { "name": "padding", "default": "VALID" },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "dilation", "default": [ 1, 1, 1, 1 ] },
+ { "name": "multiplier", "default": 1 },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weight" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "depthwise_convolution",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "bias", "default": true },
+ { "name": "regularize", "default": false },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "dilation", "default": [ 1, 1, 1, 1 ] },
+ { "name": "multiplier", "default": 1 },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "dequantize",
+ "category": "Layer",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "detectionevaluate",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" },
+ { "name": "out2" },
+ { "name": "out3" },
+ { "name": "out4" }
+ ]
+ },
+ {
+ "name": "detectionoutput",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" },
+ { "name": "in2" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "digit_capsule",
+ "attributes": [
+ { "name": "num_output", "default": 1 },
+ { "name": "vec_len", "default": 1 },
+ { "name": "iterations", "default": 3 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "divide",
+ "attributes": [],
+ "inputs": [
+ { "name": "dividend" },
+ { "name": "divisor" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "dropout",
+ "category": "Dropout",
+ "attributes": [
+ { "name": "ratio", "default": 0.5 },
+ { "name": "scale_train", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "dtype_converter",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "eltwise",
+ "attributes": [
+ { "name": "operation", "default": "SUM" },
+ { "name": "coeff", "default": "" }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "elu",
+ "category": "Activation",
+ "attributes": [
+ { "name": "alpha", "default": 0.1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "embedding_lookup",
+ "category": "Embedding",
+ "attributes": [
+ { "name": "partition_strategy", "default": "mod" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "embedding_params" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "equal",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "exp",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "expand_broadcast",
+ "attributes": [
+ { "name": "shape", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "expanddims",
+ "attributes": [
+ { "name": "dim", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "flatten",
+ "attributes": [
+ { "name": "axis", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "floor",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "floor_div",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "fullconnect",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "bias", "default": true },
+ { "name": "regularize", "default": false },
+ { "name": "axis", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "fullconnect_op",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "bias", "default": true },
+ { "name": "axis", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" }
+ ],
+ "constants": [
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "gather",
+ "category": "Transform",
+ "attributes": [
+ { "name": "axis", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "gathernd",
+ "category": "Layer",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "generator_input_layer",
+ "category": "Data",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "greater",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "greater_equal",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "group_conv1d",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "padding", "default": "VALID" },
+ { "name": "bias", "default": true },
+ { "name": "group_number", "default": 2 },
+ { "name": "ksize", "default": 1 },
+ { "name": "stride", "default": 1 },
+ { "name": "pad", "default": [ 0, 0 ] },
+ { "name": "dilation", "default": [ 1, 1, 1 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "gru",
+ "category": "Layer",
+ "attributes": [
+ { "name": "num_units", "default": 1 },
+ { "name": "time_major", "default": true },
+ { "name": "bias", "default": true },
+ { "name": "activation", "default": "tanh" },
+ { "name": "recurrent_activation", "default": "sigmoid" },
+ { "name": "return_sequences", "default": true },
+ { "name": "direction", "default": "forward" },
+ { "name": "linear_before_reset", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "in_hstat" }
+ ],
+ "constants": [
+ { "name": "gates_kernel" },
+ { "name": "gates_bias" },
+ { "name": "candidate_kernel" },
+ { "name": "candidate_bias" }
+ ],
+ "outputs": [
+ { "name": "output" },
+ { "name": "out_hstat" }
+ ]
+ },
+ {
+ "name": "gru_cell",
+ "category": "Layer",
+ "attributes": [
+ { "name": "num_units", "default": 1 },
+ { "name": "activation", "default": "tanh" },
+ { "name": "recurrent_activation", "default": "sigmoid" },
+ { "name": "linear_before_reset", "default": 0 },
+ { "name": "cudnn_implementation", "default": false }
+ ],
+ "inputs": [
+ { "name": "data" },
+ { "name": "hstat" },
+ { "name": "cond_reset" },
+ { "name": "cond_update" },
+ { "name": "cond_candidate" }
+ ],
+ "constants": [
+ { "name": "gates_kernel" },
+ { "name": "gates_bias" },
+ { "name": "candidate_kernel" },
+ { "name": "candidate_bias" }
+ ],
+ "outputs": [
+ { "name": "data" },
+ { "name": "hstat" }
+ ]
+ },
+ {
+ "name": "gru_keras",
+ "category": "Layer",
+ "attributes": [
+ { "name": "units", "default": 1 },
+ { "name": "activation", "default": "tanh" },
+ { "name": "recurrent_activation", "default": "hard_sigmoid" },
+ { "name": "use_bias", "default": true },
+ { "name": "return_sequences", "default": false },
+ { "name": "return_state", "default": false },
+ { "name": "go_backwards", "default": false },
+ { "name": "stateful", "default": false },
+ { "name": "reset_after", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "kernel" },
+ { "name": "recurrent_kernel" },
+ { "name": "bias" }
+ ],
+ "outputs": []
+ },
+ {
+ "name": "h5_input_layer",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "hard_swish",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "image_resize",
+ "category": "Layer",
+ "attributes": [
+ { "name": "type", "default": "bilinear" },
+ { "name": "new_size", "default": [] },
+ { "name": "align_corners", "default": false },
+ { "name": "half_pixel", "default": false },
+ { "name": "size_factors", "default": null }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "image_transform",
+ "category": "Layer",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "input",
+ "category": "Data",
+ "attributes": [
+ { "name": "size", "default": "" },
+ { "name": "channels", "default": 1 },
+ { "name": "shape", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "instancenormalize",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "eps", "default": 0.0001 },
+ { "name": "axis", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "bias" },
+ { "name": "scale" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "keras_rnn_lstm",
+ "category": "Layer",
+ "attributes": [
+ { "name": "cell", "default": null },
+ { "name": "go_backwards", "default": false },
+ { "name": "return_sequences", "default": false },
+ { "name": "return_state", "default": false },
+ { "name": "stateful", "default": false },
+ { "name": "unroll", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "kernel" },
+ { "name": "recurrent_kernel" },
+ { "name": "bias" }
+ ],
+ "outputs": []
+ },
+ {
+ "name": "l2normalize",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "l2n_dim", "default": null },
+ { "name": "eps", "default": 1e-12 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "l2normalizescale",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "l2n_dim", "default": null },
+ { "name": "eps", "default": 1e-12 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "scale" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "l2pooling",
+ "category": "Pool",
+ "attributes": [
+ { "name": "padding", "default": "VALID" },
+ { "name": "type", "default": "MAX" },
+ { "name": "global_pooling", "default": false },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "round_type", "default": "ceil" },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "layernormalize",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "eps", "default": 0.0001 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "bias" },
+ { "name": "scale" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "leakyrelu",
+ "category": "Activation",
+ "attributes": [
+ { "name": "leaky_ratio", "default": 0.1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "less",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "less_equal",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "lmdb_input_layer",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" }
+ ]
+ },
+ {
+ "name": "localresponsenormalization",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "local_size", "default": 1 },
+ { "name": "bias", "default": 2 },
+ { "name": "alpha", "default": 0.0001 },
+ { "name": "beta", "default": 0.75 },
+ { "name": "type", "default": "NORM_ACROSS_CHANNELS" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "localresponsenormalization_tf",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "local_size", "default": 1 },
+ { "name": "bias", "default": 2 },
+ { "name": "alpha", "default": 0.0001 },
+ { "name": "beta", "default": 0.75 },
+ { "name": "type", "default": "NORM_ACROSS_CHANNELS" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "log",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "log_softmax",
+ "category": "Activation",
+ "attributes": [
+ { "name": "sf_axis", "default": -1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "logical_and",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "logical_or",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "lstm",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "time_major", "default": true },
+ { "name": "forget_bias", "default": 1 },
+ { "name": "activation", "default": "tanh" },
+ { "name": "use_cifg", "default": false },
+ { "name": "use_peepholes", "default": false },
+ { "name": "num_proj", "default": null },
+ { "name": "cell_clip", "default": 0 },
+ { "name": "proj_clip", "default": 0 },
+ { "name": "recurrent_activation", "default": "sigmoid" },
+ { "name": "return_sequences", "default": true }
+ ],
+ "inputs": [
+ { "name": "data" },
+ { "name": "hstat" },
+ { "name": "cstat" }
+ ],
+ "constants": [
+ { "name": "lstm_cell_kernel" },
+ { "name": "lstm_cell_bias" },
+ { "name": "weight_proj" },
+ { "name": "bias_proj" }
+ ],
+ "outputs": [
+ { "name": "data" },
+ { "name": "hstat" },
+ { "name": "cstat" }
+ ]
+ },
+ {
+ "name": "lstm_keras",
+ "category": "Layer",
+ "attributes": [
+ { "name": "units", "default": 1 },
+ { "name": "activation", "default": "tanh" },
+ { "name": "recurrent_activation", "default": "hard_sigmoid" },
+ { "name": "use_bias", "default": true },
+ { "name": "return_sequences", "default": false },
+ { "name": "return_state", "default": false },
+ { "name": "go_backwards", "default": false },
+ { "name": "stateful", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "kernel" },
+ { "name": "recurrent_kernel" },
+ { "name": "bias" }
+ ],
+ "outputs": []
+ },
+ {
+ "name": "lstmunit",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weights", "default": 1 },
+ { "name": "num_proj", "default": null },
+ { "name": "forget_bias", "default": 1 },
+ { "name": "cell_clip", "default": 0 },
+ { "name": "proj_clip", "default": 0 },
+ { "name": "activation", "default": "tanh" },
+ { "name": "use_layer_norm_lstm", "default": false },
+ { "name": "use_cifg", "default": false }
+ ],
+ "inputs": [
+ { "name": "data" },
+ { "name": "hstat" },
+ { "name": "cstat" }
+ ],
+ "constants": [
+ { "name": "wi" },
+ { "name": "wf" },
+ { "name": "wc" },
+ { "name": "wo" },
+ { "name": "hi" },
+ { "name": "hf" },
+ { "name": "hc" },
+ { "name": "ho" },
+ { "name": "bi" },
+ { "name": "bf" },
+ { "name": "bc" },
+ { "name": "bo" },
+ { "name": "wp" },
+ { "name": "bp" },
+ { "name": "ln_i" },
+ { "name": "ln_f" },
+ { "name": "ln_c" },
+ { "name": "ln_o" }
+ ],
+ "outputs": [
+ { "name": "data" },
+ { "name": "hstat" },
+ { "name": "cstat" }
+ ]
+ },
+ {
+ "name": "margin_loss_layer",
+ "attributes": [
+ { "name": "margin", "default": 0.4 },
+ { "name": "downweight", "default": 0.5 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "mat_inverse",
+ "attributes": [
+ { "name": "adjoint", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "matmul",
+ "attributes": [
+ { "name": "transpose_a", "default": false },
+ { "name": "transpose_b", "default": false }
+ ],
+ "inputs": [
+ { "name": "A" },
+ { "name": "B" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "minimum",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "minimum_with_clip",
+ "attributes": [
+ { "name": "clip", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "mish",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "moments",
+ "attributes": [
+ { "name": "axis_list", "default": [] },
+ { "name": "keep_dims", "default": true }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "mean" },
+ { "name": "variance" }
+ ]
+ },
+ {
+ "name": "multiply",
+ "attributes": [
+ { "name": "axis", "default": 1 },
+ { "name": "bias", "default": true }
+ ],
+ "inputs": [],
+ "constants": [
+ { "name": "scale" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "nce_loss",
+ "attributes": [
+ { "name": "num_sampled", "default": 1 },
+ { "name": "num_classes", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [
+ { "name": "weight" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "neg",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "noop",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "noop_multi_out",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "norm_with_channel_mean",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "mean", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "norm_with_min_max",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "min_value", "default": 0 },
+ { "name": "max_value", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "norm_with_scale",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "scale", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "not_equal",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "npy_input_layer",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "output",
+ "category": "Data",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "pad",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "padding_value", "default": [] },
+ { "name": "padding_mode", "default": "CONSTANT" },
+ { "name": "padding_const", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "permute",
+ "category": "Shape",
+ "attributes": [
+ { "name": "perm", "default": [ 0 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "pool3d",
+ "category": "Pool",
+ "attributes": [
+ { "name": "type", "default": "MAX" },
+ { "name": "global_pooling", "default": false },
+ { "name": "ksize_d", "default": 1 },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_d", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "round_type", "default": "ceil" },
+ { "name": "pad_method", "default": "padding_const" },
+ { "name": "pad", "default": [ 0, 0, 0, 0, 0, 0 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "pooling",
+ "category": "Pool",
+ "attributes": [
+ { "name": "padding", "default": "VALID" },
+ { "name": "type", "default": "MAX" },
+ { "name": "global_pooling", "default": false },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "round_type", "default": "ceil" },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "poolwithargmax",
+ "category": "Pool",
+ "attributes": [
+ { "name": "padding", "default": "VALID" },
+ { "name": "type", "default": "MAX" },
+ { "name": "global_pooling", "default": false },
+ { "name": "ksize_h", "default": 1 },
+ { "name": "ksize_w", "default": 1 },
+ { "name": "stride_h", "default": 1 },
+ { "name": "stride_w", "default": 1 },
+ { "name": "pad_h", "default": 0 },
+ { "name": "pad_w", "default": 0 },
+ { "name": "round_type", "default": "ceil" },
+ { "name": "pad_method", "default": "auto" },
+ { "name": "pad", "default": [ 0, 0, 0, 0 ] },
+ { "name": "pad_h_b", "default": 0 },
+ { "name": "pad_w_r", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" }
+ ]
+ },
+ {
+ "name": "postprocess",
+ "attributes": [
+ { "name": "perm", "default": [ 0, 1, 2, 3 ] },
+ { "name": "dim_num", "default": 4 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "pow",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "prelu",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "a" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "preprocess",
+ "attributes": [
+ { "name": "type", "default": "VSI_NN_OP_PRE_PROCESS_RGB" },
+ { "name": "left", "default": 0 },
+ { "name": "top", "default": 0 },
+ { "name": "width", "default": 244 },
+ { "name": "height", "default": 224 },
+ { "name": "mean", "default": [ 0, 0, 0 ] },
+ { "name": "scale", "default": 1 },
+ { "name": "perm", "default": [ 0, 1, 2, 3 ] },
+ { "name": "in_dim_num", "default": 4 },
+ { "name": "out_dim_num", "default": 4 },
+ { "name": "out_size", "default": [ 224, 224, 3, 1 ] },
+ { "name": "reverse_channel", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "primary_capsule",
+ "attributes": [
+ { "name": "num_output", "default": 1 },
+ { "name": "vec_len", "default": 1 },
+ { "name": "strides", "default": [ 1, 1 ] },
+ { "name": "ksize", "default": [ 1, 1 ] },
+ { "name": "padding", "default": "SAME" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "weight" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "priorbox",
+ "attributes": [
+ { "name": "mini_size", "default": "" },
+ { "name": "max_size", "default": "" },
+ { "name": "aspect_ratio", "default": "" },
+ { "name": "flip", "default": "" },
+ { "name": "clip", "default": "" },
+ { "name": "variance", "default": "0.1" },
+ { "name": "image_size", "default": 0 },
+ { "name": "image_h", "default": 0 },
+ { "name": "image_w", "default": 0 },
+ { "name": "step", "default": 0 },
+ { "name": "step_h", "default": 0 },
+ { "name": "step_w", "default": 0 },
+ { "name": "offset", "default": 0.5 }
+ ],
+ "inputs": [
+ { "name": "data" },
+ { "name": "shape" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "proposal",
+ "attributes": [
+ { "name": "feat_stride", "default": 16 },
+ { "name": "anchor_scales", "default": "8 16 32" },
+ { "name": "anchor_ratios", "default": "0.5 1 2" },
+ { "name": "anchor_base_size", "default": 16 },
+ { "name": "pre_nms_top_n", "default": 6000 },
+ { "name": "post_nms_top_n", "default": 300 },
+ { "name": "nms_thresh", "default": 0.7 },
+ { "name": "min_size", "default": 16 },
+ { "name": "im_info", "default": "800 600 1 1" },
+ { "name": "has_bg", "default": true },
+ { "name": "dynamic", "default": false }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" }
+ ]
+ },
+ {
+ "name": "quantize",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "real_div",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reconstruction_loss",
+ "attributes": [
+ { "name": "balance_factor", "default": 0.0005 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "recurrent",
+ "category": "Layer",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reducemax",
+ "attributes": [
+ { "name": "axis_list", "default": null },
+ { "name": "keep_dims", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reducemean",
+ "attributes": [
+ { "name": "axis_list", "default": null },
+ { "name": "keep_dims", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reducemin",
+ "attributes": [
+ { "name": "axis_list", "default": null },
+ { "name": "keep_dims", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reducesum",
+ "attributes": [
+ { "name": "axis_list", "default": [] },
+ { "name": "keep_dims", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "region",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "relu",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "relu_keras",
+ "category": "Activation",
+ "attributes": [
+ { "name": "alpha", "default": 0 },
+ { "name": "max_value", "default": "inf" },
+ { "name": "threshold", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "relun",
+ "category": "Activation",
+ "attributes": [
+ { "name": "relu_clamp_top", "default": "inf" },
+ { "name": "relu_clamp_bottom", "default": "0" }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reorg",
+ "attributes": [
+ { "name": "stride", "default": 2 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reshape",
+ "category": "Shape",
+ "attributes": [
+ { "name": "shape", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "resizebilinear_image",
+ "attributes": [
+ { "name": "new_size", "default": [] },
+ { "name": "align_corners", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "resizenearest_image",
+ "attributes": [
+ { "name": "new_size", "default": [] },
+ { "name": "align_corners", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reverse",
+ "attributes": [
+ { "name": "axis", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "reverse_sequence",
+ "attributes": [
+ { "name": "seq_axis", "default": 1 },
+ { "name": "batch_axis", "default": 2 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "seq_lengths" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "roipooling",
+ "category": "Pool",
+ "attributes": [
+ { "name": "pooled_h", "default": 6 },
+ { "name": "pooled_w", "default": 6 },
+ { "name": "spatial_scale", "default": 0.0625 },
+ { "name": "sampling_ratio", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "route_train",
+ "attributes": [],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "rsqrt",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "scatternd",
+ "attributes": [
+ { "name": "shape", "default": [] }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "shuffle",
+ "attributes": [
+ { "name": "group_number", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "sigmoid",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "signalframe",
+ "attributes": [
+ { "name": "frame_length", "default": 0 },
+ { "name": "frame_step", "default": 0 },
+ { "name": "pad_end", "default": false },
+ { "name": "pad_value", "default": 0 },
+ { "name": "axis", "default": -1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "simplernn_keras",
+ "category": "Layer",
+ "attributes": [
+ { "name": "units", "default": 1 },
+ { "name": "activation", "default": "tanh" },
+ { "name": "use_bias", "default": true },
+ { "name": "return_sequences", "default": false },
+ { "name": "return_state", "default": false },
+ { "name": "go_backwards", "default": false },
+ { "name": "stateful", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "kernel" },
+ { "name": "recurrent_kernel" },
+ { "name": "bias" }
+ ],
+ "outputs": []
+ },
+ {
+ "name": "sin",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "slice",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "begin", "default": [] },
+ { "name": "size", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "softmax",
+ "category": "Activation",
+ "attributes": [
+ { "name": "sf_axis", "default": -1 },
+ { "name": "beta", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "softmax_with_logits_loss_layer",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "softrelu",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "space2batch",
+ "attributes": [
+ { "name": "block_shape", "default": [ 2, 2 ] },
+ { "name": "block_paddings", "default": [ [ 0, 0 ], [ 0, 0 ] ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "space2depth",
+ "attributes": [
+ { "name": "block_size", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "split",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "dim", "default": 1 },
+ { "name": "slices", "default": "" },
+ { "name": "slices_tf", "default": "" },
+ { "name": "unstack", "default": false }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "sqlite_input_layer",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" },
+ { "name": "out2" }
+ ]
+ },
+ {
+ "name": "sqrt",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "square",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "squashing",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "squeeze",
+ "attributes": [
+ { "name": "axis_list", "default": null },
+ { "name": "name", "default": null }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "stack",
+ "attributes": [
+ { "name": "axis", "default": 0 }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "stack_concat",
+ "attributes": [
+ { "name": "shape", "default": [ 1, 32, 256 ] },
+ { "name": "axis", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" },
+ { "name": "in2" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "stridedslice",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "slice_begin_mask", "default": 0 },
+ { "name": "slice_end_mask", "default": 0 },
+ { "name": "slice_ellipsis_mask", "default": 0 },
+ { "name": "slice_new_axis_mask", "default": 0 },
+ { "name": "slice_shrink_axis_mask", "default": 0 },
+ { "name": "slice_begin", "default": [ 0, 0, 0, 0 ] },
+ { "name": "slice_end", "default": [ -1, -1, -1, -1 ] },
+ { "name": "slice_strides", "default": [ 1, 1, 1, 1 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "subgraph",
+ "attributes": [
+ { "name": "sg_argv", "default": "" },
+ { "name": "sg_func", "default": "" },
+ { "name": "sg_out_shapes", "default": "" },
+ { "name": "sg_graph_buffer", "default": "" },
+ { "name": "sg_input_nodes", "default": "" },
+ { "name": "sg_output_nodes", "default": "" }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "subtract",
+ "attributes": [],
+ "inputs": [
+ { "name": "minuend" },
+ { "name": "subtrahend" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "svdf",
+ "attributes": [
+ { "name": "rank", "default": 1 },
+ { "name": "num_units", "default": 1 },
+ { "name": "spectrogram_length", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [
+ { "name": "weights_feature" },
+ { "name": "weights_time" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "swish",
+ "category": "Activation",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "tanh",
+ "category": "Activation",
+ "attributes": [
+ { "name": "hyperbolic_tan_scale_a", "default": 1 },
+ { "name": "hyperbolic_tan_scale_b", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "text_input_layer",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "tile",
+ "attributes": [
+ { "name": "multiples", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "topk",
+ "attributes": [
+ { "name": "topk", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "topk_score",
+ "attributes": [
+ { "name": "topk", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" }
+ ]
+ },
+ {
+ "name": "unstack",
+ "attributes": [
+ { "name": "axis", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": []
+ },
+ {
+ "name": "upsampling",
+ "category": "Layer",
+ "attributes": [
+ { "name": "factor", "default": 2 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "variable",
+ "category": "Data",
+ "attributes": [
+ { "name": "shape", "default": [ 1 ] }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [
+ { "name": "data" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "where",
+ "attributes": [],
+ "inputs": [
+ { "name": "in0" },
+ { "name": "in1" },
+ { "name": "in2" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "word2vec_input",
+ "attributes": [
+ { "name": "database", "default": "" },
+ { "name": "shapes", "default": [] },
+ { "name": "sparse_tensors", "default": [] },
+ { "name": "data_types", "default": [] },
+ { "name": "dictionary", "default": "" },
+ { "name": "model", "default": "skip-gram" },
+ { "name": "num_skips", "default": 2 },
+ { "name": "skip_window", "default": 1 }
+ ],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "out0" },
+ { "name": "out1" }
+ ]
+ },
+ {
+ "name": "yolo",
+ "attributes": [],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "yoloprocess",
+ "attributes": [],
+ "inputs": [],
+ "constants": [],
+ "outputs": [
+ { "name": "output" }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/acuity.js b/acuity.js
new file mode 100644
index 00000000000..67b15d44e3e
--- /dev/null
+++ b/acuity.js
@@ -0,0 +1,551 @@
+
+const acuity = {};
+
+acuity.ModelFactory = class {
+
+ match(context) {
+ const extension = context.identifier.split('.').pop().toLowerCase();
+ if (extension === 'json') {
+ const obj = context.peek('json');
+ if (obj && obj.MetaData && obj.Layers) {
+ return obj;
+ }
+ }
+ return null;
+ }
+
+ async open(context, target) {
+ const metadata = await context.metadata('acuity-metadata.json');
+ return new acuity.Model(metadata, target);
+ }
+};
+
+acuity.Model = class {
+
+ constructor(metadata, model, data, quantization) {
+ this.name = model.MetaData.Name;
+ this.format = `Acuity v${model.MetaData.AcuityVersion}`;
+ this.runtime = model.MetaData.Platform;
+ this.graphs = [ new acuity.Graph(metadata, model, data, quantization) ];
+ }
+};
+
+acuity.Graph = class {
+
+ constructor(metadata, model) {
+ this.nodes = [];
+ this.inputs = [];
+ this.outputs = [];
+ const values = new Map();
+ const value = (name) => {
+ if (!values.has(name)) {
+ values.set(name, { name: name, shape: null });
+ }
+ return values.get(name);
+ };
+ for (const [name, layer] of Object.entries(model.Layers)) {
+ layer.inputs = layer.inputs.map((input) => {
+ return value(input);
+ });
+ layer.outputs = layer.outputs.map((port) => {
+ const output = value(`@${name}:${port}`);
+ let shape = null;
+ if (layer.op.toLowerCase() == 'input' ||
+ layer.op.toLowerCase() == 'variable') {
+ if (Object.prototype.hasOwnProperty.call(layer.parameters, 'shape') && layer.parameters.shape.length > 0) {
+ shape = layer.parameters.shape;
+ } else if (Object.prototype.hasOwnProperty.call(layer.parameters, 'size') && Object.prototype.hasOwnProperty.call(layer.parameters, 'channels')) {
+ const sizes = layer.parameters.size.split(' ');
+ shape = [0, parseInt(sizes[0]), parseInt(sizes[1]), layer.parameters.channels];
+ }
+ if (shape && shape.length === 4 && shape[0] === 0) {
+ shape[0] = 1;
+ }
+ }
+ output.shape = shape;
+ return output;
+ });
+ }
+ acuity.Inference.infer(model.Layers);
+ for (const [name, obj] of values) {
+ const type = new acuity.TensorType(null, new acuity.TensorShape(obj.shape));
+ const value = new acuity.Value(name, type, null, null);
+ values.set(name, value);
+ }
+ for (const [name, layer] of Object.entries(model.Layers)) {
+ switch (layer.op.toLowerCase()) {
+ case 'input': {
+ const value = values.get(layer.outputs[0].name);
+ const argument = new acuity.Argument(name, [ value ]);
+ this.inputs.push(argument);
+ break;
+ }
+ case 'output': {
+ const value = values.get(layer.inputs[0].name);
+ const argument = new acuity.Argument(name, [ value ]);
+ this.outputs.push(argument);
+ break;
+ }
+ default: {
+ const node = new acuity.Node(metadata, name, layer, values);
+ this.nodes.push(node);
+ break;
+ }
+ }
+ }
+ }
+};
+
+acuity.Node = class {
+
+ constructor(metadata, name, layer, values) {
+ const op = layer.op;
+ this.name = name;
+ this.type = metadata.type(op) || { name: op };
+ this.inputs = [];
+ this.outputs = [];
+ this.attributes = [];
+ if (this.type) {
+ if (layer.parameters) {
+ for (const [name, value] of Object.entries(layer.parameters)) {
+ const meta = metadata.attribute(op, name);
+ const type = meta && meta.type ? meta.type : null;
+ const visible = meta && meta.default !== undefined && meta.default === value ? false : true;
+ const attribute = new acuity.Argument(name, value, type, visible);
+ this.attributes.push(attribute);
+ }
+ }
+ }
+ for (let i = 0; i < layer.inputs.length; i++) {
+ const input = layer.inputs[i];
+ const value = values.get(input.name);
+ const name = this.type && this.type.inputs && i < this.type.inputs.length ? this.type.inputs[i].name : `input${i}`;
+ const argument = new acuity.Argument(name, [ value ]);
+ this.inputs.push(argument);
+ }
+
+ if (this.type && this.type.constants) {
+ for (const constant of this.type.constants) {
+ // const name = "@" + this.name + ":" + constant.name;
+ const type = new acuity.TensorType(null, new acuity.TensorShape(null));
+ const value = new acuity.Value('', type, null, new acuity.Tensor(type));
+ const argument = new acuity.Argument(constant.name, [ value ]);
+ this.inputs.push(argument);
+ }
+ }
+
+ for (let i = 0; i < layer.outputs.length; i++) {
+ const output = layer.outputs[i];
+ const value = values.get(output.name);
+ const name = this.type && this.type.outputs && i < this.type.outputs.length ? this.type.outputs[i].name : `output${i}`;
+ const argument = new acuity.Argument(name, [ value ]);
+ this.outputs.push(argument);
+ }
+ }
+};
+
+acuity.Argument = class {
+
+ constructor(name, value, type, visible) {
+ this.name = name;
+ this.value = value;
+ if (type) {
+ this.type = type;
+ }
+ if (visible === false) {
+ this.visible = false;
+ }
+ }
+};
+
+acuity.Value = class {
+
+ constructor(name, type, quantization, initializer) {
+ if (typeof name !== 'string') {
+ throw new acuity.Error(`Invalid value identifier '${JSON.stringify(name)}'.`);
+ }
+ this.name = name;
+ this.type = type || null;
+ this.quantization = quantization || null;
+ this.initializer = initializer || null;
+ }
+};
+
+acuity.TensorType = class {
+
+ constructor(dataType, shape) {
+ this.dataType = dataType || '?';
+ this.shape = shape;
+ }
+
+ toString() {
+ return (this.dataType || '?') + this.shape.toString();
+ }
+};
+
+acuity.TensorShape = class {
+
+ constructor(dimensions) {
+ this.dimensions = Array.isArray(dimensions) && dimensions.length == 1 && dimensions[0] == 0 ? [] : dimensions;
+ }
+
+ toString() {
+ if (!Array.isArray(this.dimensions) || this.dimensions.length == 0 || (this.dimensions.length == 1 && this.dimensions[0] == 0)) {
+ return '';
+ }
+ return `[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`;
+ }
+};
+
+acuity.Tensor = class {
+
+ constructor(type) {
+ this.type = type;
+ this.Category = 'Constant';
+ }
+};
+
+acuity.Inference = class {
+
+ static infer(layers) {
+ const outputs = new Map();
+ const outputLayers = [];
+ for (const [, layer] of Object.entries(layers)) {
+ if (layer.op.toLowerCase() == 'output') {
+ outputLayers.push(layer);
+ }
+ for (const output of layer.outputs) {
+ outputs.set(output.name, layer);
+ }
+ }
+ const broadcasts = new Set([
+ 'add', 'equal', 'fllor_mod', 'floor_div', 'greater', 'greater_equal', 'less', 'less_equal',
+ 'logical_and', 'logical_or', 'minimum', 'multiply', 'not_equal', 'pow', 'real_div',
+ 'squared_difference', 'subtract'
+ ]);
+ const passthroughs = new Set([
+ 'LocalResponseNormalization', 'a_times_b_plus_c', 'abs', 'batchnorm_single', 'batchnormalize',
+ 'cast', 'cast', 'clipbyvalue', 'dequantize', 'dtype_converter', 'elu', 'exp', 'floor',
+ 'groupnormalize', 'hard_sigmoid', 'hard_swish', 'instancenormalize', 'l2normalize', 'l2normalizescale',
+ 'layernormalize', 'leakyrelu', 'log', 'log_softmax', 'mish', 'neg', 'norm_with_channel_mean',
+ 'norm_with_min_max', 'norm_with_scale', 'pow', 'prelu', 'quantize', 'relu', 'relu_keras',
+ 'relun', 'reverse', 'round', 'rsqrt', 'sigmoid', 'sin', 'softmax', 'softrelu', 'sqrt', 'square', 'tanh'
+ ]);
+ const reduces = new Set([
+ 'reduceany', 'reducemax', 'reducemean', 'reducemin', 'reduceprod', 'reducesum'
+ ]);
+ const operators = new Map();
+ operators.set('broadcast', ([a, b]) => {
+ const longer = a.length >= b.length ? a.slice() : b.slice();
+ const shorter = a.length < b.length ? a.slice() : b.slice();
+ const remain = longer.length - shorter.length;
+ for (let i = 0; i < remain; i++) {
+ shorter.splice(0, 0, 1);
+ }
+ for (let i = 0; i < longer.length; i++) {
+ longer[i] = longer[i] > shorter[i] ? longer[i] : shorter[i];
+ }
+ return [ longer ];
+ });
+ operators.set('concat', (inputs, params) => {
+ const outputShape = inputs[0].slice();
+ outputShape[params.dim] = 0;
+ for (const shape of inputs) {
+ outputShape[params.dim] += shape[params.dim];
+ }
+ return [ outputShape ];
+ });
+ operators.set('conv1d', (inputs, params) => {
+ if (params.padding == 'VALID') {
+ const out_h = ~~((inputs[0][1] + params.stride - params.ksize) / params.stride);
+ return [ [ inputs[0][0], out_h, params.weights ] ];
+ } else if (params.padding == 'SAME') {
+ const out_h = ~~((inputs[0][1] + params.stride - 1) / params.stride);
+ return [ [ inputs[0][0], out_h, params.weights ] ];
+ }
+ return null;
+ });
+ operators.set('convolution', (inputs, params) => {
+ if (params.padding == 'VALID') {
+ const out_h = ~~((inputs[0][1] + params.stride_h + params.pad[0] + params.pad[1] - params.ksize_h) / params.stride_h);
+ const out_w = ~~((inputs[0][2] + params.stride_w + params.pad[2] + params.pad[3]- params.ksize_w) / params.stride_w);
+ return [ [ inputs[0][0], out_h, out_w, params.weights ] ];
+ } else if (params.padding == 'SAME') {
+ const out_h = ~~((inputs[0][1] + params.stride_h - 1) / params.stride_h);
+ const out_w = ~~((inputs[0][2] + params.stride_w - 1) / params.stride_w);
+ return [ [ inputs[0][0], out_h, out_w, params.weights ] ];
+ }
+ return null;
+ });
+ operators.set('deconvolution', (inputs, params) => {
+ return [ params.output_shape.map((item, index) => item == 0 ? inputs[0][index] : item) ];
+ });
+ operators.set('fullconnect', (inputs, params) => {
+ return [ inputs[0].slice(0, params.axis).concat([params.weights]) ];
+ });
+ operators.set('gather', (inputs, params) => {
+ const prefix = inputs[1].slice();
+ const suffix = inputs[0].slice(params.axis + 1);
+ return [ prefix.concat(suffix) ];
+ });
+ operators.set('lstm', (inputs, params) => {
+ const [input] = inputs;
+ const [a, b] = input;
+ let batch = a;
+ const output = params.num_proj != null ? params.num_proj : params.weights;
+ if (params.time_major) {
+ batch = b;
+ }
+ const newShape = params.return_sequences ? [ a, b, output ] : [ batch, output ];
+ return [ newShape, [batch, output], [batch, params.weights] ];
+ });
+ operators.set('matmul', ([a, b], params) => {
+ let newShape = a.slice(0, -2);
+ if (params.transpose_a) {
+ newShape = newShape.concat(a.slice(-1));
+ } else {
+ newShape = newShape.concat(a.slice(-2, -1));
+ }
+ if (params.transpose_b) {
+ newShape = newShape.concat(b.slice(-2, -1));
+ } else {
+ newShape = newShape.concat(b.slice(-1));
+ }
+ return [ newShape ];
+ });
+ operators.set('pad', (inputs, params) => {
+ return [ inputs[0].map((item, index) => item + params.padding_value[index][0] + params.padding_value[index][1]) ];
+ });
+ operators.set('permute', (inputs, params) => {
+ return [ inputs[0].map((item, index) => inputs[0][params.perm[index]]) ];
+ });
+ operators.set('pooling', (inputs, params) => {
+ if (params.padding == 'VALID') {
+ const out_h = ~~((inputs[0][1] + params.stride_h - params.ksize_h) / params.stride_h);
+ const out_w = ~~((inputs[0][2] + params.stride_w - params.ksize_w) / params.stride_w);
+ return [ [inputs[0][0], out_h, out_w, inputs[0][3]] ];
+ } else if (params.padding == 'SAME') {
+ const out_h = ~~((inputs[0][1] + params.stride_h - 1) / params.stride_h);
+ const out_w = ~~((inputs[0][2] + params.stride_w - 1) / params.stride_w);
+ return [ [inputs[0][0], out_h, out_w, inputs[0][3]] ];
+ }
+ return null;
+ });
+ operators.set('reduce', (inputs, params) => {
+ const newShape = inputs[0].slice();
+ if (params.keep_dims) {
+ for (const i in params.axis_list) {
+ newShape[i] = 1;
+ }
+ } else {
+ const axis_list = params.axis_list.map((item) => {
+ return item < 0 ? newShape.length + item : item;
+ });
+ axis_list.sort((a, b) => {
+ return b - a;
+ });
+ for (const item of axis_list) {
+ newShape.splice(item, 1);
+ }
+ if (!newShape.length) {
+ newShape.splice(0, 0, 0);
+ }
+ }
+ return [ newShape ];
+ });
+ operators.set('repeat', (inputs, params) => {
+ const newShape = inputs[0].slice();
+ newShape[params.axis] = params.maxlen;
+ return [ newShape ];
+ });
+ operators.set('reshape', (inputs, params) => {
+ const negativeIndexs = [];
+ let shape = params.shape;
+ if (typeof params.shape === 'string') {
+ shape = params.shape.split(/\s+/).map((item) => {
+ return parseInt(item);
+ });
+ }
+ const newShape = shape.map((item, index) => {
+ if (item == 0) {
+ return inputs[0][index];
+ }
+ if (item == -1) {
+ negativeIndexs.push(index);
+ return 1;
+ }
+ return item;
+ });
+ if (negativeIndexs.length > 0) {
+ newShape[negativeIndexs[0]] = inputs[0].reduce((a, c) => a * c) / newShape.reduce((a, c) => a * c);
+ }
+ return [ newShape ];
+ });
+ operators.set('sequence_mask', (inputs, params) => {
+ return [ inputs[0].slice().concat([params.maxlen]) ];
+ });
+ operators.set('slice', (inputs, params) => {
+ return [ params.size.map((item, index) => item == -1 ? inputs[0][index] : item) ];
+ });
+ operators.set('squeeze', (inputs, params) => {
+ const newShape = inputs[0].slice();
+ const axis_list = [...new Set(params.axis_list)].sort((a, b) => b - a);
+ for (const item of axis_list) {
+ newShape.splice(item, 1);
+ }
+ return [ newShape ];
+ });
+ operators.set('space2depth', (inputs, params) => {
+ const h = inputs[0][1] / params.block_size[0];
+ const w = inputs[0][2] / params.block_size[1];
+ const c = inputs[0][3] * params.block_size[1] * params.block_size[1];
+ return [ [inputs[0][0], h, w, c] ];
+ });
+ operators.set('split', (inputs, params) => {
+ const sizes = [];
+ const slices = params.slices.slice();
+ slices.splice(0, 0, 0);
+ slices.push(inputs[0][params.dim]);
+ slices.reduce((a, b) => {
+ sizes.push(b - a);
+ return b;
+ });
+ return sizes.map((item) => {
+ const shape = inputs[0].slice();
+ shape[params.dim] = item;
+ return shape;
+ });
+ });
+ operators.set('stack', (inputs, params) => {
+ const newShape = inputs[0].slice();
+ if (newShape.length == 1 && newShape[0] == 0) {
+ newShape[0] = 1;
+ } else {
+ newShape.splice(params.axis, 0, inputs.length);
+ }
+ return [ newShape ];
+ });
+ operators.set('stridedslice', (inputs, params) => {
+ const input_shape = inputs[0].slice();
+ const begin = params.slice_begin.slice();
+ const end = params.slice_end.slice();
+ if (params.slice_begin_mask > 0) {
+ for (let i = 0; i < begin.length; i++) {
+ if ((params.slice_begin_mask >>> i) & 0x1) {
+ begin[i] = -1;
+ }
+ }
+ }
+ if (params.slice_end_mask > 0) {
+ for (let i = 0; i < end.length; i++) {
+ if ((params.slice_end_mask >>> i) & 0x1) {
+ end[i] = -1;
+ }
+ }
+ }
+ for (let i = 0; i < begin.length; i++) {
+ if (begin[i] == -1) {
+ begin[i] = 0;
+ }
+ }
+ if (inputs[0].length == end.length) {
+ for (let i = 0; i < end.length; i++) {
+ if (end[i] == -1 || end[i] > input_shape[i]) {
+ end[i] = input_shape[i];
+ }
+ }
+ } else if (inputs[0].length < end.length) {
+ if (params.slice_new_axis_mask) {
+ const len = (params.slice_new_axis_mask >>> 0).toString(2).length;
+ for (let i = 0; i < len; i++) {
+ if ((params.slice_new_axis_mask >>> i) & 0x1) {
+ input_shape.splice(i, 0, 1);
+ }
+ }
+ for (let i = 0; i < end.length; i++) {
+ if (end[i] == -1) {
+ end[i] = input_shape[i];
+ }
+ }
+ }
+ }
+ let newShape = [];
+ for (let i = 0; i < begin.length; i++) {
+ newShape = newShape.concat([(end[i] - begin[i])/params.slice_strides[i]]);
+ }
+ if (params.slice_shrink_axis_mask) {
+ const len = (params.slice_shrink_axis_mask >>> 0).toString(2).length;
+ for (let i = 0; i < len; i++) {
+ if ((params.slice_shrink_axis_mask >>> i) & 0x1) {
+ newShape.splice(i, 1);
+ }
+ }
+ }
+ if (params.slice_new_axis_mask) {
+ const len = (params.slice_new_axis_mask >>> 0).toString(2).length;
+ for (let i = 0; i < len; i++) {
+ if ((params.slice_new_axis_mask >>> i) & 0x1) {
+ if (inputs[0].length == begin.length) {
+ newShape.splice(i, 0, 1);
+ } else if (inputs[0].length < begin.length) {
+ newShape[i] = 1;
+ }
+ }
+ }
+ }
+ return [ newShape ];
+ });
+ const infer = (output) => {
+ if (outputs.has(output.name)) {
+ let ready = true;
+ const layer = outputs.get(output.name);
+ for (const input of layer.inputs) {
+ if (input.shape === null) {
+ infer(input);
+ if (input.shape === null) {
+ ready = false;
+ break;
+ }
+ }
+ }
+ if (ready) {
+ let callback = null;
+ if (operators.has(layer.op)) {
+ callback = operators.get(layer.op);
+ } else if (passthroughs.has(layer.op)) {
+ callback = (inputs) => [ inputs[0].slice() ];
+ } else if (broadcasts.has(layer.op)) {
+ callback = operators.get('broadcast');
+ } else if (reduces.has(layer.op)) {
+ callback = operators.get('reduce');
+ } else {
+ callback = () => [];
+ }
+ const parameters = layer.parameters;
+ const inputs = layer.inputs.map((input) => input.shape);
+ const outputs = callback(inputs, parameters);
+ for (let i = 0; i < outputs.length; i++) {
+ if (i < layer.outputs.length) {
+ layer.outputs[i].shape = outputs[i];
+ }
+ }
+ }
+ }
+ };
+ for (const layer of outputLayers) {
+ for (const output of layer.outputs) {
+ infer(output);
+ }
+ }
+ }
+};
+
+acuity.Error = class extends Error {
+
+ constructor(message) {
+ super(message);
+ this.name = 'Error loading Acuity model.';
+ }
+};
+
+export const ModelFactory = acuity.ModelFactory;
diff --git a/armnn-metadata.json b/armnn-metadata.json
new file mode 100644
index 00000000000..1370de24b12
--- /dev/null
+++ b/armnn-metadata.json
@@ -0,0 +1,422 @@
+[
+ {
+ "name": "ActivationLayer",
+ "category": "Activation",
+ "attributes": [
+ { "name": "activationFunction", "type": "ActivationFunction" },
+ { "name": "a", "type": "float32" },
+ { "name": "b", "type": "float32" }
+ ]
+ },
+ {
+ "name": "AdditionLayer",
+ "inputs": [
+ { "name": "A" },
+ { "name": "B" }
+ ],
+ "outputs": [
+ { "name": "C" }
+ ]
+ },
+ {
+ "name": "BatchNormalizationLayer",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "eps", "type": "float32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "mean" },
+ { "name": "variance" },
+ { "name": "beta" },
+ { "name": "gamma" }
+ ]
+ },
+ {
+ "name": "BatchToSpaceNdLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "blockShape", "type": "string" },
+ { "name": "crops", "type": "string" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "ConcatLayer",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "concatAxis", "type": "uint32" },
+ { "name": "numViews", "type": "uint32" },
+ { "name": "numDimensions", "type": "uint32" }
+ ]
+ },
+ {
+ "name": "ConstantLayer",
+ "category": "Constant",
+ "inputs": [
+ { "name": "input" }
+ ]
+ },
+ {
+ "name": "Convolution2dLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "padTop", "type": "uint32" },
+ { "name": "padRight", "type": "uint32" },
+ { "name": "padBottom", "type": "uint32" },
+ { "name": "padLeft", "type": "uint32" },
+ { "name": "strideX", "type": "uint32" },
+ { "name": "strideY", "type": "uint32" },
+ { "name": "dilationX", "type": "uint32" },
+ { "name": "dilationY", "type": "uint32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" },
+ { "name": "biases" }
+ ]
+ },
+ {
+ "name": "DepthwiseConvolution2dLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "padTop", "type": "uint32" },
+ { "name": "padRight", "type": "uint32" },
+ { "name": "padBottom", "type": "uint32" },
+ { "name": "padLeft", "type": "uint32" },
+ { "name": "strideX", "type": "uint32" },
+ { "name": "strideY", "type": "uint32" },
+ { "name": "dilationX", "type": "uint32" },
+ { "name": "dilationY", "type": "uint32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" },
+ { "name": "biases" }
+ ]
+ },
+ {
+ "name": "DequantizeLayer"
+ },
+ {
+ "name": "DetectionPostProcessLayer",
+ "category": "Custom",
+ "attributes": [
+ { "name": "maxDetections", "type": "uint32" },
+ { "name": "maxClassesPerDetection", "type": "uint32" },
+ { "name": "detectionsPerClass", "type": "uint32" },
+ { "name": "nmsScoreThreshold", "type": "float32" },
+ { "name": "numIouThreshold", "type": "float32" },
+ { "name": "numClasses", "type": "uint32" },
+ { "name": "useRegularNms", "type": "boolean" },
+ { "name": "scaleX", "type": "float32" },
+ { "name": "scaleY", "type": "float32" },
+ { "name": "scaleW", "type": "float32" },
+ { "name": "scaleH", "type": "float32" }
+ ]
+ },
+ {
+ "name": "DivisionLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "EqualLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "FloorLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "FullyConnectedLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "transposeWeightsMatrix", "type": "boolean" }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" },
+ { "name": "biases" }
+ ]
+ },
+ {
+ "name": "GatherLayer",
+ "category": "Tensor"
+ },
+ {
+ "name": "GreaterLayer",
+ "category": "Layer",
+ "attributes": []
+ },
+ {
+ "name": "InputLayer"
+ },
+ {
+ "name": "L2NormalizationLayer",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "eps", "type": "float32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "LstmLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "activationFunc", "type": "uint32" },
+ { "name": "clippingThresCell", "type": "float32" },
+ { "name": "clippingThresProj", "type": "float32" },
+ { "name": "cifgEnabled", "type": "boolean" },
+ { "name": "peepholeEnabled", "type": "boolean" },
+ { "name": "projectionEnabled", "type": "boolean" },
+ { "name": "layerNormEnabled", "type": "boolean" }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "inputToForgetWeights1" },
+ { "name": "inputToCellWeights1" },
+ { "name": "inputToOutputWeights1" },
+ { "name": "recurrentToForgetWeights1" },
+ { "name": "recurrentToCellWeights1" },
+ { "name": "recurrentToOutputWeights1" },
+ { "name": "forgetGateBias1" },
+ { "name": "cellBias1" },
+ { "name": "outputGateBias1" },
+ { "name": "inputToInputWeights1" },
+ { "name": "recurrentToInputWeights1" },
+ { "name": "cellToInputWeights1" },
+ { "name": "inputGateBias1" },
+ { "name": "projectionWeights1" },
+ { "name": "projectionBias1" },
+ { "name": "cellToForgetWeights1" },
+ { "name": "cellToOutputWeights1" },
+ { "name": "inputLayerNormWeights1" },
+ { "name": "forgetLayerNormWeights1" },
+ { "name": "cellLayerNormWeights1" },
+ { "name": "outputLayerNormWeights1" }
+ ]
+ },
+ {
+ "name": "MaximumLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "MeanLayer",
+ "attributes": [
+ { "name": "axis", "type": "uint32" },
+ { "name": "keepDims", "type": "boolean" }
+ ]
+ },
+ {
+ "name": "MergeLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "MergerLayer",
+ "category": "Tensor"
+ },
+ {
+ "name": "MinimumLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "MultiplicationLayer",
+ "inputs": [
+ { "name": "A" },
+ { "name": "B" }
+ ],
+ "outputs": [
+ { "name": "C" }
+ ]
+ },
+ {
+ "name": "NormalizationLayer",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "normChannelType", "type": "NormalizationAlgorithmChannel" },
+ { "name": "normMethodType", "type": "NormalizationAlgorithmMethod" },
+ { "name": "normSize", "type": "uint32" },
+ { "name": "alpha", "type": "float32" },
+ { "name": "beta", "type": "float32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "OutputLayer",
+ "category": "Tensor"
+ },
+ {
+ "name": "PadLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "padList", "type": "uint32" },
+ { "name": "padValue", "type": "float32" }
+ ]
+ },
+ {
+ "name": "PermuteLayer",
+ "category": "Shape",
+ "attributes": [
+ { "name": "dimMappings", "type": "string" }
+ ]
+ },
+ {
+ "name": "Pooling2dLayer",
+ "category": "Pool",
+ "attributes": [
+ { "name": "poolType", "type": "PoolingAlgorithm" },
+ { "name": "padTop", "type": "uint32" },
+ { "name": "padRight", "type": "uint32" },
+ { "name": "padBottom", "type": "uint32" },
+ { "name": "padLeft", "type": "uint32" },
+ { "name": "poolWidth", "type": "uint32" },
+ { "name": "poolHeight", "type": "uint32" },
+ { "name": "strideX", "type": "uint32" },
+ { "name": "strideY", "type": "uint32" },
+ { "name": "outputShapeRounding", "type": "OutputShapeRounding" },
+ { "name": "paddingMethod", "type": "PaddingMethod" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "PreluLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "QuantizedLstmLayer",
+ "category": "Layer",
+ "inputs": [
+ { "name": "input" },
+ { "name": "inputToInputWeights1" },
+ { "name": "inputToForgetWeights1" },
+ { "name": "inputToCellWeights1" },
+ { "name": "inputToOutputWeights1" },
+ { "name": "recurrentToInputWeights1" },
+ { "name": "recurrentToForgetWeights1" },
+ { "name": "recurrentToCellWeights1" },
+ { "name": "recurrentToOutputWeights1" },
+ { "name": "inputGateBias1" },
+ { "name": "forgetGateBias1" },
+ { "name": "cellBias1" },
+ { "name": "outputGateBias1" }
+ ]
+ },
+ {
+ "name": "QuantizeLayer"
+ },
+ {
+ "name": "ReshapeLayer",
+ "category": "Shape",
+ "attributes": [
+ { "name": "targetShape", "type": "uint32[]" }
+ ]
+ },
+ {
+ "name": "ResizeBilinearLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "targetWidth", "type": "uint32" },
+ { "name": "targetHeight", "type": "uint32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "ResizeLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "targetWidth", "type": "uint32" },
+ { "name": "targetHeight", "type": "uint32" },
+ { "name": "method", "type": "ResizeMethod" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "RsqrtLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "SoftmaxLayer",
+ "category": "Activation",
+ "attributes": [
+ { "name": "beta", "type": "float32" }
+ ]
+ },
+ {
+ "name": "SpaceToBatchNdLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "blockShape", "type": "string" },
+ { "name": "padList", "type": "string" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "SpaceToDepthLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "blockSize", "type": "uint32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "SplitterLayer",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "concatAxis", "type": "uint32" },
+ { "name": "numViews", "type": "uint32" },
+ { "name": "numDimensions", "type": "uint32" }
+ ]
+ },
+ {
+ "name": "StackLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "axis", "type": "uint32" },
+ { "name": "numInputs", "type": "uint32" },
+ { "name": "inputShape", "type": "uint32" }
+ ]
+ },
+ {
+ "name": "StridedSliceLayer",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "begin", "type": "int32" },
+ { "name": "end", "type": "int32" },
+ { "name": "stride", "type": "int32" },
+ { "name": "beginMask", "type": "int32" },
+ { "name": "endMask", "type": "int32" },
+ { "name": "shrinkAxisMask", "type": "int32" },
+ { "name": "ellipsisMask", "type": "int32" },
+ { "name": "newAxisMask", "type": "int32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ]
+ },
+ {
+ "name": "SubtractionLayer"
+ },
+ {
+ "name": "SwitchLayer",
+ "category": "Layer"
+ },
+ {
+ "name": "TransposeConvolution2dLayer",
+ "category": "Layer",
+ "attributes": [
+ { "name": "padTop", "type": "uint32" },
+ { "name": "padRight", "type": "uint32" },
+ { "name": "padBottom", "type": "uint32" },
+ { "name": "padLeft", "type": "uint32" },
+ { "name": "strideX", "type": "uint32" },
+ { "name": "strideY", "type": "uint32" },
+ { "name": "dataLayout", "type": "DataLayout" }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" },
+ { "name": "biases" }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/armnn-schema.js b/armnn-schema.js
new file mode 100644
index 00000000000..fe7da30d97a
--- /dev/null
+++ b/armnn-schema.js
@@ -0,0 +1,2504 @@
+
+import * as flatbuffers from './flatbuffers.js';
+
+const $root = flatbuffers.get('armnn');
+
+$root.armnnSerializer = $root.armnnSerializer || {};
+
+$root.armnnSerializer.ActivationFunction = {
+ Sigmoid: 0,
+ TanH: 1,
+ Linear: 2,
+ ReLu: 3,
+ BoundedReLu: 4,
+ SoftReLu: 5,
+ LeakyReLu: 6,
+ Abs: 7,
+ Sqrt: 8,
+ Square: 9,
+ Elu: 10,
+ HardSwish: 11
+};
+
+$root.armnnSerializer.ArgMinMaxFunction = {
+ Min: 0,
+ Max: 1
+};
+
+$root.armnnSerializer.DataType = {
+ Float16: 0,
+ Float32: 1,
+ QuantisedAsymm8: 2,
+ Signed32: 3,
+ Boolean: 4,
+ QuantisedSymm16: 5,
+ QAsymmU8: 6,
+ QSymmS16: 7,
+ QAsymmS8: 8,
+ QSymmS8: 9
+};
+
+$root.armnnSerializer.DataLayout = {
+ NHWC: 0,
+ NCHW: 1
+};
+
+$root.armnnSerializer.ResizeMethod = {
+ NearestNeighbor: 0,
+ Bilinear: 1
+};
+
+$root.armnnSerializer.TensorInfo = class TensorInfo {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.TensorInfo();
+ $.dimensions = reader.typedArray(position, 4, Uint32Array);
+ $.dataType = reader.int8_(position, 6, 0);
+ $.quantizationScale = reader.float32_(position, 8, 1);
+ $.quantizationOffset = reader.int32_(position, 10, 0);
+ $.quantizationScales = reader.typedArray(position, 12, Float32Array);
+ $.quantizationDim = reader.uint32_(position, 14, 0);
+ $.dimensionality = reader.uint32_(position, 16, 1);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.TensorInfo();
+ $.dimensions = reader.typedArray(json.dimensions, Uint32Array);
+ $.dataType = $root.armnnSerializer.DataType[json.dataType];
+ $.quantizationScale = reader.value(json.quantizationScale, 1);
+ $.quantizationOffset = reader.value(json.quantizationOffset, 0);
+ $.quantizationScales = reader.typedArray(json.quantizationScales, Float32Array);
+ $.quantizationDim = reader.value(json.quantizationDim, 0);
+ $.dimensionality = reader.value(json.dimensionality, 1);
+ return $;
+ }
+};
+
+$root.armnnSerializer.Connection = class Connection {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.Connection();
+ $.sourceLayerIndex = reader.uint32(position + 0);
+ $.outputSlotIndex = reader.uint32(position + 4);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.Connection();
+ $.sourceLayerIndex = json.sourceLayerIndex;
+ $.outputSlotIndex = json.outputSlotIndex;
+ return $;
+ }
+};
+
+$root.armnnSerializer.ByteData = class ByteData {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ByteData();
+ $.data = reader.typedArray(position, 4, Int8Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ByteData();
+ $.data = reader.typedArray(json.data, Int8Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ShortData = class ShortData {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ShortData();
+ $.data = reader.typedArray(position, 4, Int16Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ShortData();
+ $.data = reader.typedArray(json.data, Int16Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.IntData = class IntData {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.IntData();
+ $.data = reader.typedArray(position, 4, Int32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.IntData();
+ $.data = reader.typedArray(json.data, Int32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.LongData = class LongData {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LongData();
+ $.data = reader.int64s_(position, 4);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LongData();
+ $.data = reader.array(json.data);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ConstTensorData = class {
+
+ static decode(reader, position, type) {
+ switch (type) {
+ case 1: return $root.armnnSerializer.ByteData.decode(reader, position);
+ case 2: return $root.armnnSerializer.ShortData.decode(reader, position);
+ case 3: return $root.armnnSerializer.IntData.decode(reader, position);
+ case 4: return $root.armnnSerializer.LongData.decode(reader, position);
+ default: return undefined;
+ }
+ }
+
+ static decodeText(reader, json, type) {
+ switch (type) {
+ case 'ByteData': return $root.armnnSerializer.ByteData.decodeText(reader, json);
+ case 'ShortData': return $root.armnnSerializer.ShortData.decodeText(reader, json);
+ case 'IntData': return $root.armnnSerializer.IntData.decodeText(reader, json);
+ case 'LongData': return $root.armnnSerializer.LongData.decodeText(reader, json);
+ default: return undefined;
+ }
+ }
+};
+
+$root.armnnSerializer.ConstTensor = class ConstTensor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ConstTensor();
+ $.info = reader.table(position, 4, $root.armnnSerializer.TensorInfo.decode);
+ $.data = reader.union(position, 6, $root.armnnSerializer.ConstTensorData.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ConstTensor();
+ $.info = reader.object(json.info, $root.armnnSerializer.TensorInfo.decodeText);
+ $.data = $root.armnnSerializer.ConstTensorData.decodeText(reader, json.data, json.data_type);
+ return $;
+ }
+};
+
+$root.armnnSerializer.InputSlot = class InputSlot {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.InputSlot();
+ $.index = reader.uint32_(position, 4, 0);
+ $.connection = reader.struct(position, 6, $root.armnnSerializer.Connection.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.InputSlot();
+ $.index = reader.value(json.index, 0);
+ $.connection = reader.object(json.connection, $root.armnnSerializer.Connection.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.OutputSlot = class OutputSlot {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.OutputSlot();
+ $.index = reader.uint32_(position, 4, 0);
+ $.tensorInfo = reader.table(position, 6, $root.armnnSerializer.TensorInfo.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.OutputSlot();
+ $.index = reader.value(json.index, 0);
+ $.tensorInfo = reader.object(json.tensorInfo, $root.armnnSerializer.TensorInfo.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.LayerType = {
+ Addition: 0,
+ Input: 1,
+ Multiplication: 2,
+ Output: 3,
+ Pooling2d: 4,
+ Reshape: 5,
+ Softmax: 6,
+ Convolution2d: 7,
+ DepthwiseConvolution2d: 8,
+ Activation: 9,
+ Permute: 10,
+ FullyConnected: 11,
+ Constant: 12,
+ SpaceToBatchNd: 13,
+ BatchToSpaceNd: 14,
+ Division: 15,
+ Minimum: 16,
+ Equal: 17,
+ Maximum: 18,
+ Normalization: 19,
+ Pad: 20,
+ Rsqrt: 21,
+ Floor: 22,
+ BatchNormalization: 23,
+ Greater: 24,
+ ResizeBilinear: 25,
+ Subtraction: 26,
+ StridedSlice: 27,
+ Gather: 28,
+ Mean: 29,
+ Merger: 30,
+ L2Normalization: 31,
+ Splitter: 32,
+ DetectionPostProcess: 33,
+ Lstm: 34,
+ Quantize: 35,
+ Dequantize: 36,
+ Merge: 37,
+ Switch: 38,
+ Concat: 39,
+ SpaceToDepth: 40,
+ Prelu: 41,
+ TransposeConvolution2d: 42,
+ Resize: 43,
+ Stack: 44,
+ QuantizedLstm: 45,
+ Abs: 46,
+ ArgMinMax: 47,
+ Slice: 48,
+ DepthToSpace: 49,
+ InstanceNormalization: 50,
+ LogSoftmax: 51,
+ Comparison: 52,
+ StandIn: 53,
+ ElementwiseUnary: 54,
+ Transpose: 55,
+ QLstm: 56,
+ Fill: 57,
+ Rank: 58
+};
+
+$root.armnnSerializer.LayerBase = class LayerBase {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LayerBase();
+ $.index = reader.uint32_(position, 4, 0);
+ $.layerName = reader.string_(position, 6, null);
+ $.layerType = reader.uint32_(position, 8, 0);
+ $.inputSlots = reader.tableArray(position, 10, $root.armnnSerializer.InputSlot.decode);
+ $.outputSlots = reader.tableArray(position, 12, $root.armnnSerializer.OutputSlot.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LayerBase();
+ $.index = reader.value(json.index, 0);
+ $.layerName = reader.value(json.layerName, null);
+ $.layerType = $root.armnnSerializer.LayerType[json.layerType];
+ $.inputSlots = reader.objectArray(json.inputSlots, $root.armnnSerializer.InputSlot.decodeText);
+ $.outputSlots = reader.objectArray(json.outputSlots, $root.armnnSerializer.OutputSlot.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.BindableLayerBase = class BindableLayerBase {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.BindableLayerBase();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.layerBindingId = reader.int32_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.BindableLayerBase();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.layerBindingId = reader.value(json.layerBindingId, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.AbsLayer = class AbsLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.AbsLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.AbsLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ActivationLayer = class ActivationLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ActivationLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ActivationDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ActivationLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ActivationDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ActivationDescriptor = class ActivationDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ActivationDescriptor();
+ $.activationFunction = reader.int8_(position, 4, 0);
+ $.a = reader.float32_(position, 6, 0);
+ $.b = reader.float32_(position, 8, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ActivationDescriptor();
+ $.activationFunction = $root.armnnSerializer.ActivationFunction[json.activationFunction];
+ $.a = reader.value(json.a, 0);
+ $.b = reader.value(json.b, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.AdditionLayer = class AdditionLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.AdditionLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.AdditionLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ArgMinMaxLayer = class ArgMinMaxLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ArgMinMaxLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ArgMinMaxDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ArgMinMaxLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ArgMinMaxDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ArgMinMaxDescriptor = class ArgMinMaxDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ArgMinMaxDescriptor();
+ $.argMinMaxFunction = reader.int8_(position, 4, 0);
+ $.axis = reader.int32_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ArgMinMaxDescriptor();
+ $.argMinMaxFunction = $root.armnnSerializer.ArgMinMaxFunction[json.argMinMaxFunction];
+ $.axis = reader.value(json.axis, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ComparisonOperation = {
+ Equal: 0,
+ Greater: 1,
+ GreaterOrEqual: 2,
+ Less: 3,
+ LessOrEqual: 4,
+ NotEqual: 5
+};
+
+$root.armnnSerializer.ComparisonDescriptor = class ComparisonDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ComparisonDescriptor();
+ $.operation = reader.int8_(position, 4, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ComparisonDescriptor();
+ $.operation = $root.armnnSerializer.ComparisonOperation[json.operation];
+ return $;
+ }
+};
+
+$root.armnnSerializer.ComparisonLayer = class ComparisonLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ComparisonLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ComparisonDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ComparisonLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ComparisonDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ConstantLayer = class ConstantLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ConstantLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.input = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ConstantLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.input = reader.object(json.input, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.Convolution2dLayer = class Convolution2dLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.Convolution2dLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.Convolution2dDescriptor.decode);
+ $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.Convolution2dLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.Convolution2dDescriptor.decodeText);
+ $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.Convolution2dDescriptor = class Convolution2dDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.Convolution2dDescriptor();
+ $.padLeft = reader.uint32_(position, 4, 0);
+ $.padRight = reader.uint32_(position, 6, 0);
+ $.padTop = reader.uint32_(position, 8, 0);
+ $.padBottom = reader.uint32_(position, 10, 0);
+ $.strideX = reader.uint32_(position, 12, 0);
+ $.strideY = reader.uint32_(position, 14, 0);
+ $.dilationX = reader.uint32_(position, 16, 1);
+ $.dilationY = reader.uint32_(position, 18, 1);
+ $.biasEnabled = reader.bool_(position, 20, false);
+ $.dataLayout = reader.int8_(position, 22, 1);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.Convolution2dDescriptor();
+ $.padLeft = reader.value(json.padLeft, 0);
+ $.padRight = reader.value(json.padRight, 0);
+ $.padTop = reader.value(json.padTop, 0);
+ $.padBottom = reader.value(json.padBottom, 0);
+ $.strideX = reader.value(json.strideX, 0);
+ $.strideY = reader.value(json.strideY, 0);
+ $.dilationX = reader.value(json.dilationX, 1);
+ $.dilationY = reader.value(json.dilationY, 1);
+ $.biasEnabled = reader.value(json.biasEnabled, false);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.DepthToSpaceLayer = class DepthToSpaceLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DepthToSpaceLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.DepthToSpaceDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DepthToSpaceLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.DepthToSpaceDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.DepthToSpaceDescriptor = class DepthToSpaceDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DepthToSpaceDescriptor();
+ $.blockSize = reader.uint32_(position, 4, 0);
+ $.dataLayout = reader.int8_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DepthToSpaceDescriptor();
+ $.blockSize = reader.value(json.blockSize, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.DivisionLayer = class DivisionLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DivisionLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DivisionLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.UnaryOperation = {
+ Abs: 0,
+ Rsqrt: 1,
+ Sqrt: 2,
+ Exp: 3,
+ Neg: 4
+};
+
+$root.armnnSerializer.ElementwiseUnaryDescriptor = class ElementwiseUnaryDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ElementwiseUnaryDescriptor();
+ $.operation = reader.int8_(position, 4, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ElementwiseUnaryDescriptor();
+ $.operation = $root.armnnSerializer.UnaryOperation[json.operation];
+ return $;
+ }
+};
+
+$root.armnnSerializer.ElementwiseUnaryLayer = class ElementwiseUnaryLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ElementwiseUnaryLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ElementwiseUnaryDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ElementwiseUnaryLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ElementwiseUnaryDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.EqualLayer = class EqualLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.EqualLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.EqualLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.FillLayer = class FillLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.FillLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.FillDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.FillLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.FillDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.FillDescriptor = class FillDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.FillDescriptor();
+ $.value = reader.float32_(position, 4, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.FillDescriptor();
+ $.value = reader.value(json.value, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.FloorLayer = class FloorLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.FloorLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.FloorLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.FullyConnectedLayer = class FullyConnectedLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.FullyConnectedLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.FullyConnectedDescriptor.decode);
+ $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.FullyConnectedLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.FullyConnectedDescriptor.decodeText);
+ $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.FullyConnectedDescriptor = class FullyConnectedDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.FullyConnectedDescriptor();
+ $.biasEnabled = reader.bool_(position, 4, false);
+ $.transposeWeightsMatrix = reader.bool_(position, 6, false);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.FullyConnectedDescriptor();
+ $.biasEnabled = reader.value(json.biasEnabled, false);
+ $.transposeWeightsMatrix = reader.value(json.transposeWeightsMatrix, false);
+ return $;
+ }
+};
+
+$root.armnnSerializer.GatherLayer = class GatherLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.GatherLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.GatherDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.GatherLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.GatherDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.GatherDescriptor = class GatherDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.GatherDescriptor();
+ $.axis = reader.int32_(position, 4, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.GatherDescriptor();
+ $.axis = reader.value(json.axis, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.GreaterLayer = class GreaterLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.GreaterLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.GreaterLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.InputLayer = class InputLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.InputLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.BindableLayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.InputLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.BindableLayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.InstanceNormalizationLayer = class InstanceNormalizationLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.InstanceNormalizationLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.InstanceNormalizationDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.InstanceNormalizationLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.InstanceNormalizationDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.InstanceNormalizationDescriptor = class InstanceNormalizationDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.InstanceNormalizationDescriptor();
+ $.gamma = reader.float32_(position, 4, 0);
+ $.beta = reader.float32_(position, 6, 0);
+ $.eps = reader.float32_(position, 8, 0);
+ $.dataLayout = reader.int8_(position, 10, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.InstanceNormalizationDescriptor();
+ $.gamma = reader.value(json.gamma, 0);
+ $.beta = reader.value(json.beta, 0);
+ $.eps = reader.value(json.eps, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.LogSoftmaxLayer = class LogSoftmaxLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LogSoftmaxLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.LogSoftmaxDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LogSoftmaxLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.LogSoftmaxDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.LogSoftmaxDescriptor = class LogSoftmaxDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LogSoftmaxDescriptor();
+ $.beta = reader.float32_(position, 4, 1);
+ $.axis = reader.int32_(position, 6, -1);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LogSoftmaxDescriptor();
+ $.beta = reader.value(json.beta, 1);
+ $.axis = reader.value(json.axis, -1);
+ return $;
+ }
+};
+
+$root.armnnSerializer.L2NormalizationLayer = class L2NormalizationLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.L2NormalizationLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.L2NormalizationDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.L2NormalizationLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.L2NormalizationDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.L2NormalizationDescriptor = class L2NormalizationDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.L2NormalizationDescriptor();
+ $.dataLayout = reader.int8_(position, 4, 1);
+ $.eps = reader.float32_(position, 6, 1e-12);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.L2NormalizationDescriptor();
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ $.eps = reader.value(json.eps, 1e-12);
+ return $;
+ }
+};
+
+$root.armnnSerializer.MinimumLayer = class MinimumLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MinimumLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MinimumLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.MaximumLayer = class MaximumLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MaximumLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MaximumLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.MultiplicationLayer = class MultiplicationLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MultiplicationLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MultiplicationLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.Pooling2dLayer = class Pooling2dLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.Pooling2dLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.Pooling2dDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.Pooling2dLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.Pooling2dDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.PoolingAlgorithm = {
+ Max: 0,
+ Average: 1,
+ L2: 2
+};
+
+$root.armnnSerializer.OutputShapeRounding = {
+ Floor: 0,
+ Ceiling: 1
+};
+
+$root.armnnSerializer.PaddingMethod = {
+ IgnoreValue: 0,
+ Exclude: 1
+};
+
+$root.armnnSerializer.Pooling2dDescriptor = class Pooling2dDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.Pooling2dDescriptor();
+ $.poolType = reader.int8_(position, 4, 0);
+ $.padLeft = reader.uint32_(position, 6, 0);
+ $.padRight = reader.uint32_(position, 8, 0);
+ $.padTop = reader.uint32_(position, 10, 0);
+ $.padBottom = reader.uint32_(position, 12, 0);
+ $.poolWidth = reader.uint32_(position, 14, 0);
+ $.poolHeight = reader.uint32_(position, 16, 0);
+ $.strideX = reader.uint32_(position, 18, 0);
+ $.strideY = reader.uint32_(position, 20, 0);
+ $.outputShapeRounding = reader.int8_(position, 22, 0);
+ $.paddingMethod = reader.int8_(position, 24, 0);
+ $.dataLayout = reader.int8_(position, 26, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.Pooling2dDescriptor();
+ $.poolType = $root.armnnSerializer.PoolingAlgorithm[json.poolType];
+ $.padLeft = reader.value(json.padLeft, 0);
+ $.padRight = reader.value(json.padRight, 0);
+ $.padTop = reader.value(json.padTop, 0);
+ $.padBottom = reader.value(json.padBottom, 0);
+ $.poolWidth = reader.value(json.poolWidth, 0);
+ $.poolHeight = reader.value(json.poolHeight, 0);
+ $.strideX = reader.value(json.strideX, 0);
+ $.strideY = reader.value(json.strideY, 0);
+ $.outputShapeRounding = $root.armnnSerializer.OutputShapeRounding[json.outputShapeRounding];
+ $.paddingMethod = $root.armnnSerializer.PaddingMethod[json.paddingMethod];
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.QuantizeLayer = class QuantizeLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.QuantizeLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.QuantizeLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SoftmaxLayer = class SoftmaxLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SoftmaxLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.SoftmaxDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SoftmaxLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SoftmaxDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SoftmaxDescriptor = class SoftmaxDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SoftmaxDescriptor();
+ $.beta = reader.float32_(position, 4, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SoftmaxDescriptor();
+ $.beta = reader.value(json.beta, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.DepthwiseConvolution2dLayer = class DepthwiseConvolution2dLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DepthwiseConvolution2dLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.DepthwiseConvolution2dDescriptor.decode);
+ $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DepthwiseConvolution2dLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.DepthwiseConvolution2dDescriptor.decodeText);
+ $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.DepthwiseConvolution2dDescriptor = class DepthwiseConvolution2dDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DepthwiseConvolution2dDescriptor();
+ $.padLeft = reader.uint32_(position, 4, 0);
+ $.padRight = reader.uint32_(position, 6, 0);
+ $.padTop = reader.uint32_(position, 8, 0);
+ $.padBottom = reader.uint32_(position, 10, 0);
+ $.strideX = reader.uint32_(position, 12, 0);
+ $.strideY = reader.uint32_(position, 14, 0);
+ $.dilationX = reader.uint32_(position, 16, 1);
+ $.dilationY = reader.uint32_(position, 18, 1);
+ $.biasEnabled = reader.bool_(position, 20, false);
+ $.dataLayout = reader.int8_(position, 22, 1);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DepthwiseConvolution2dDescriptor();
+ $.padLeft = reader.value(json.padLeft, 0);
+ $.padRight = reader.value(json.padRight, 0);
+ $.padTop = reader.value(json.padTop, 0);
+ $.padBottom = reader.value(json.padBottom, 0);
+ $.strideX = reader.value(json.strideX, 0);
+ $.strideY = reader.value(json.strideY, 0);
+ $.dilationX = reader.value(json.dilationX, 1);
+ $.dilationY = reader.value(json.dilationY, 1);
+ $.biasEnabled = reader.value(json.biasEnabled, false);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.OutputLayer = class OutputLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.OutputLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.BindableLayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.OutputLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.BindableLayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ReshapeLayer = class ReshapeLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ReshapeLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ReshapeDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ReshapeLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ReshapeDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ReshapeDescriptor = class ReshapeDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ReshapeDescriptor();
+ $.targetShape = reader.typedArray(position, 4, Uint32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ReshapeDescriptor();
+ $.targetShape = reader.typedArray(json.targetShape, Uint32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.PermuteLayer = class PermuteLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.PermuteLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.PermuteDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.PermuteLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.PermuteDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.PermuteDescriptor = class PermuteDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.PermuteDescriptor();
+ $.dimMappings = reader.typedArray(position, 4, Uint32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.PermuteDescriptor();
+ $.dimMappings = reader.typedArray(json.dimMappings, Uint32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SpaceToBatchNdLayer = class SpaceToBatchNdLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SpaceToBatchNdLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.SpaceToBatchNdDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SpaceToBatchNdLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SpaceToBatchNdDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SpaceToBatchNdDescriptor = class SpaceToBatchNdDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SpaceToBatchNdDescriptor();
+ $.blockShape = reader.typedArray(position, 4, Uint32Array);
+ $.padList = reader.typedArray(position, 6, Uint32Array);
+ $.dataLayout = reader.int8_(position, 8, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SpaceToBatchNdDescriptor();
+ $.blockShape = reader.typedArray(json.blockShape, Uint32Array);
+ $.padList = reader.typedArray(json.padList, Uint32Array);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.SpaceToDepthLayer = class SpaceToDepthLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SpaceToDepthLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.SpaceToDepthDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SpaceToDepthLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SpaceToDepthDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SpaceToDepthDescriptor = class SpaceToDepthDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SpaceToDepthDescriptor();
+ $.blockSize = reader.uint32_(position, 4, 0);
+ $.dataLayout = reader.int8_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SpaceToDepthDescriptor();
+ $.blockSize = reader.value(json.blockSize, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.SubtractionLayer = class SubtractionLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SubtractionLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SubtractionLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.BatchToSpaceNdLayer = class BatchToSpaceNdLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.BatchToSpaceNdLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.BatchToSpaceNdDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.BatchToSpaceNdLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.BatchToSpaceNdDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.BatchToSpaceNdDescriptor = class BatchToSpaceNdDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.BatchToSpaceNdDescriptor();
+ $.blockShape = reader.typedArray(position, 4, Uint32Array);
+ $.crops = reader.typedArray(position, 6, Uint32Array);
+ $.dataLayout = reader.int8_(position, 8, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.BatchToSpaceNdDescriptor();
+ $.blockShape = reader.typedArray(json.blockShape, Uint32Array);
+ $.crops = reader.typedArray(json.crops, Uint32Array);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.NormalizationAlgorithmChannel = {
+ Across: 0,
+ Within: 1
+};
+
+$root.armnnSerializer.NormalizationAlgorithmMethod = {
+ LocalBrightness: 0,
+ LocalContrast: 1
+};
+
+$root.armnnSerializer.NormalizationLayer = class NormalizationLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.NormalizationLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.NormalizationDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.NormalizationLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.NormalizationDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.NormalizationDescriptor = class NormalizationDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.NormalizationDescriptor();
+ $.normChannelType = reader.int8_(position, 4, 0);
+ $.normMethodType = reader.int8_(position, 6, 0);
+ $.normSize = reader.uint32_(position, 8, 0);
+ $.alpha = reader.float32_(position, 10, 0);
+ $.beta = reader.float32_(position, 12, 0);
+ $.k = reader.float32_(position, 14, 0);
+ $.dataLayout = reader.int8_(position, 16, 1);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.NormalizationDescriptor();
+ $.normChannelType = $root.armnnSerializer.NormalizationAlgorithmChannel[json.normChannelType];
+ $.normMethodType = $root.armnnSerializer.NormalizationAlgorithmMethod[json.normMethodType];
+ $.normSize = reader.value(json.normSize, 0);
+ $.alpha = reader.value(json.alpha, 0);
+ $.beta = reader.value(json.beta, 0);
+ $.k = reader.value(json.k, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.MeanLayer = class MeanLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MeanLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.MeanDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MeanLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.MeanDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.MeanDescriptor = class MeanDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MeanDescriptor();
+ $.axis = reader.typedArray(position, 4, Uint32Array);
+ $.keepDims = reader.bool_(position, 6, false);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MeanDescriptor();
+ $.axis = reader.typedArray(json.axis, Uint32Array);
+ $.keepDims = reader.value(json.keepDims, false);
+ return $;
+ }
+};
+
+$root.armnnSerializer.PadLayer = class PadLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.PadLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.PadDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.PadLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.PadDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.PadDescriptor = class PadDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.PadDescriptor();
+ $.padList = reader.typedArray(position, 4, Uint32Array);
+ $.padValue = reader.float32_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.PadDescriptor();
+ $.padList = reader.typedArray(json.padList, Uint32Array);
+ $.padValue = reader.value(json.padValue, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.RsqrtLayer = class RsqrtLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.RsqrtLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.RsqrtLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.BatchNormalizationLayer = class BatchNormalizationLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.BatchNormalizationLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.BatchNormalizationDescriptor.decode);
+ $.mean = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.variance = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ $.beta = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode);
+ $.gamma = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.BatchNormalizationLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.BatchNormalizationDescriptor.decodeText);
+ $.mean = reader.object(json.mean, $root.armnnSerializer.ConstTensor.decodeText);
+ $.variance = reader.object(json.variance, $root.armnnSerializer.ConstTensor.decodeText);
+ $.beta = reader.object(json.beta, $root.armnnSerializer.ConstTensor.decodeText);
+ $.gamma = reader.object(json.gamma, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.BatchNormalizationDescriptor = class BatchNormalizationDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.BatchNormalizationDescriptor();
+ $.eps = reader.float32_(position, 4, 0);
+ $.dataLayout = reader.int8_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.BatchNormalizationDescriptor();
+ $.eps = reader.value(json.eps, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.ResizeBilinearLayer = class ResizeBilinearLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ResizeBilinearLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ResizeBilinearDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ResizeBilinearLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ResizeBilinearDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ResizeBilinearDescriptor = class ResizeBilinearDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ResizeBilinearDescriptor();
+ $.targetWidth = reader.uint32_(position, 4, 0);
+ $.targetHeight = reader.uint32_(position, 6, 0);
+ $.dataLayout = reader.int8_(position, 8, 0);
+ $.alignCorners = reader.bool_(position, 10, false);
+ $.halfPixelCenters = reader.bool_(position, 12, false);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ResizeBilinearDescriptor();
+ $.targetWidth = reader.value(json.targetWidth, 0);
+ $.targetHeight = reader.value(json.targetHeight, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ $.alignCorners = reader.value(json.alignCorners, false);
+ $.halfPixelCenters = reader.value(json.halfPixelCenters, false);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SliceLayer = class SliceLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SliceLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.SliceDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SliceLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.SliceDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SliceDescriptor = class SliceDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SliceDescriptor();
+ $.begin = reader.typedArray(position, 4, Uint32Array);
+ $.size = reader.typedArray(position, 6, Uint32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SliceDescriptor();
+ $.begin = reader.typedArray(json.begin, Uint32Array);
+ $.size = reader.typedArray(json.size, Uint32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.StridedSliceLayer = class StridedSliceLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.StridedSliceLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.StridedSliceDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.StridedSliceLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.StridedSliceDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.StridedSliceDescriptor = class StridedSliceDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.StridedSliceDescriptor();
+ $.begin = reader.typedArray(position, 4, Int32Array);
+ $.end = reader.typedArray(position, 6, Int32Array);
+ $.stride = reader.typedArray(position, 8, Int32Array);
+ $.beginMask = reader.int32_(position, 10, 0);
+ $.endMask = reader.int32_(position, 12, 0);
+ $.shrinkAxisMask = reader.int32_(position, 14, 0);
+ $.ellipsisMask = reader.int32_(position, 16, 0);
+ $.newAxisMask = reader.int32_(position, 18, 0);
+ $.dataLayout = reader.int8_(position, 20, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.StridedSliceDescriptor();
+ $.begin = reader.typedArray(json.begin, Int32Array);
+ $.end = reader.typedArray(json.end, Int32Array);
+ $.stride = reader.typedArray(json.stride, Int32Array);
+ $.beginMask = reader.value(json.beginMask, 0);
+ $.endMask = reader.value(json.endMask, 0);
+ $.shrinkAxisMask = reader.value(json.shrinkAxisMask, 0);
+ $.ellipsisMask = reader.value(json.ellipsisMask, 0);
+ $.newAxisMask = reader.value(json.newAxisMask, 0);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.ConcatLayer = class ConcatLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ConcatLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.OriginsDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ConcatLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.OriginsDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.MergerLayer = class MergerLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MergerLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.OriginsDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MergerLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.OriginsDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.UintVector = class UintVector {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.UintVector();
+ $.data = reader.typedArray(position, 4, Uint32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.UintVector();
+ $.data = reader.typedArray(json.data, Uint32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.OriginsDescriptor = class OriginsDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.OriginsDescriptor();
+ $.concatAxis = reader.uint32_(position, 4, 0);
+ $.numViews = reader.uint32_(position, 6, 0);
+ $.numDimensions = reader.uint32_(position, 8, 0);
+ $.viewOrigins = reader.tableArray(position, 10, $root.armnnSerializer.UintVector.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.OriginsDescriptor();
+ $.concatAxis = reader.value(json.concatAxis, 0);
+ $.numViews = reader.value(json.numViews, 0);
+ $.numDimensions = reader.value(json.numDimensions, 0);
+ $.viewOrigins = reader.objectArray(json.viewOrigins, $root.armnnSerializer.UintVector.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ViewsDescriptor = class ViewsDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ViewsDescriptor();
+ $.origins = reader.table(position, 4, $root.armnnSerializer.OriginsDescriptor.decode);
+ $.viewSizes = reader.tableArray(position, 6, $root.armnnSerializer.UintVector.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ViewsDescriptor();
+ $.origins = reader.object(json.origins, $root.armnnSerializer.OriginsDescriptor.decodeText);
+ $.viewSizes = reader.objectArray(json.viewSizes, $root.armnnSerializer.UintVector.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SplitterLayer = class SplitterLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SplitterLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ViewsDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SplitterLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ViewsDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.DetectionPostProcessLayer = class DetectionPostProcessLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DetectionPostProcessLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.DetectionPostProcessDescriptor.decode);
+ $.anchors = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DetectionPostProcessLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.DetectionPostProcessDescriptor.decodeText);
+ $.anchors = reader.object(json.anchors, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.DetectionPostProcessDescriptor = class DetectionPostProcessDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DetectionPostProcessDescriptor();
+ $.maxDetections = reader.uint32_(position, 4, 0);
+ $.maxClassesPerDetection = reader.uint32_(position, 6, 0);
+ $.detectionsPerClass = reader.uint32_(position, 8, 0);
+ $.nmsScoreThreshold = reader.float32_(position, 10, 0);
+ $.nmsIouThreshold = reader.float32_(position, 12, 0);
+ $.numClasses = reader.uint32_(position, 14, 0);
+ $.useRegularNms = reader.bool_(position, 16, false);
+ $.scaleX = reader.float32_(position, 18, 0);
+ $.scaleY = reader.float32_(position, 20, 0);
+ $.scaleW = reader.float32_(position, 22, 0);
+ $.scaleH = reader.float32_(position, 24, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DetectionPostProcessDescriptor();
+ $.maxDetections = reader.value(json.maxDetections, 0);
+ $.maxClassesPerDetection = reader.value(json.maxClassesPerDetection, 0);
+ $.detectionsPerClass = reader.value(json.detectionsPerClass, 0);
+ $.nmsScoreThreshold = reader.value(json.nmsScoreThreshold, 0);
+ $.nmsIouThreshold = reader.value(json.nmsIouThreshold, 0);
+ $.numClasses = reader.value(json.numClasses, 0);
+ $.useRegularNms = reader.value(json.useRegularNms, false);
+ $.scaleX = reader.value(json.scaleX, 0);
+ $.scaleY = reader.value(json.scaleY, 0);
+ $.scaleW = reader.value(json.scaleW, 0);
+ $.scaleH = reader.value(json.scaleH, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.LstmInputParams = class LstmInputParams {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LstmInputParams();
+ $.inputToForgetWeights = reader.table(position, 4, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToCellWeights = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToOutputWeights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToForgetWeights = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToCellWeights = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToOutputWeights = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode);
+ $.forgetGateBias = reader.table(position, 16, $root.armnnSerializer.ConstTensor.decode);
+ $.cellBias = reader.table(position, 18, $root.armnnSerializer.ConstTensor.decode);
+ $.outputGateBias = reader.table(position, 20, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToInputWeights = reader.table(position, 22, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToInputWeights = reader.table(position, 24, $root.armnnSerializer.ConstTensor.decode);
+ $.cellToInputWeights = reader.table(position, 26, $root.armnnSerializer.ConstTensor.decode);
+ $.inputGateBias = reader.table(position, 28, $root.armnnSerializer.ConstTensor.decode);
+ $.projectionWeights = reader.table(position, 30, $root.armnnSerializer.ConstTensor.decode);
+ $.projectionBias = reader.table(position, 32, $root.armnnSerializer.ConstTensor.decode);
+ $.cellToForgetWeights = reader.table(position, 34, $root.armnnSerializer.ConstTensor.decode);
+ $.cellToOutputWeights = reader.table(position, 36, $root.armnnSerializer.ConstTensor.decode);
+ $.inputLayerNormWeights = reader.table(position, 38, $root.armnnSerializer.ConstTensor.decode);
+ $.forgetLayerNormWeights = reader.table(position, 40, $root.armnnSerializer.ConstTensor.decode);
+ $.cellLayerNormWeights = reader.table(position, 42, $root.armnnSerializer.ConstTensor.decode);
+ $.outputLayerNormWeights = reader.table(position, 44, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LstmInputParams();
+ $.inputToForgetWeights = reader.object(json.inputToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToCellWeights = reader.object(json.inputToCellWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToOutputWeights = reader.object(json.inputToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToForgetWeights = reader.object(json.recurrentToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToCellWeights = reader.object(json.recurrentToCellWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToOutputWeights = reader.object(json.recurrentToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.forgetGateBias = reader.object(json.forgetGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellBias = reader.object(json.cellBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.outputGateBias = reader.object(json.outputGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToInputWeights = reader.object(json.inputToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToInputWeights = reader.object(json.recurrentToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellToInputWeights = reader.object(json.cellToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputGateBias = reader.object(json.inputGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.projectionWeights = reader.object(json.projectionWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.projectionBias = reader.object(json.projectionBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellToForgetWeights = reader.object(json.cellToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellToOutputWeights = reader.object(json.cellToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputLayerNormWeights = reader.object(json.inputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.forgetLayerNormWeights = reader.object(json.forgetLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellLayerNormWeights = reader.object(json.cellLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.outputLayerNormWeights = reader.object(json.outputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.LstmDescriptor = class LstmDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LstmDescriptor();
+ $.activationFunc = reader.uint32_(position, 4, 0);
+ $.clippingThresCell = reader.float32_(position, 6, 0);
+ $.clippingThresProj = reader.float32_(position, 8, 0);
+ $.cifgEnabled = reader.bool_(position, 10, true);
+ $.peepholeEnabled = reader.bool_(position, 12, false);
+ $.projectionEnabled = reader.bool_(position, 14, false);
+ $.layerNormEnabled = reader.bool_(position, 16, false);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LstmDescriptor();
+ $.activationFunc = reader.value(json.activationFunc, 0);
+ $.clippingThresCell = reader.value(json.clippingThresCell, 0);
+ $.clippingThresProj = reader.value(json.clippingThresProj, 0);
+ $.cifgEnabled = reader.value(json.cifgEnabled, true);
+ $.peepholeEnabled = reader.value(json.peepholeEnabled, false);
+ $.projectionEnabled = reader.value(json.projectionEnabled, false);
+ $.layerNormEnabled = reader.value(json.layerNormEnabled, false);
+ return $;
+ }
+};
+
+$root.armnnSerializer.LstmLayer = class LstmLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.LstmLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.LstmDescriptor.decode);
+ $.inputParams = reader.table(position, 8, $root.armnnSerializer.LstmInputParams.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.LstmLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.LstmDescriptor.decodeText);
+ $.inputParams = reader.object(json.inputParams, $root.armnnSerializer.LstmInputParams.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.QLstmInputParams = class QLstmInputParams {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.QLstmInputParams();
+ $.inputToForgetWeights = reader.table(position, 4, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToCellWeights = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToOutputWeights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToForgetWeights = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToCellWeights = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToOutputWeights = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode);
+ $.forgetGateBias = reader.table(position, 16, $root.armnnSerializer.ConstTensor.decode);
+ $.cellBias = reader.table(position, 18, $root.armnnSerializer.ConstTensor.decode);
+ $.outputGateBias = reader.table(position, 20, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToInputWeights = reader.table(position, 22, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToInputWeights = reader.table(position, 24, $root.armnnSerializer.ConstTensor.decode);
+ $.inputGateBias = reader.table(position, 26, $root.armnnSerializer.ConstTensor.decode);
+ $.projectionWeights = reader.table(position, 28, $root.armnnSerializer.ConstTensor.decode);
+ $.projectionBias = reader.table(position, 30, $root.armnnSerializer.ConstTensor.decode);
+ $.cellToInputWeights = reader.table(position, 32, $root.armnnSerializer.ConstTensor.decode);
+ $.cellToForgetWeights = reader.table(position, 34, $root.armnnSerializer.ConstTensor.decode);
+ $.cellToOutputWeights = reader.table(position, 36, $root.armnnSerializer.ConstTensor.decode);
+ $.inputLayerNormWeights = reader.table(position, 38, $root.armnnSerializer.ConstTensor.decode);
+ $.forgetLayerNormWeights = reader.table(position, 40, $root.armnnSerializer.ConstTensor.decode);
+ $.cellLayerNormWeights = reader.table(position, 42, $root.armnnSerializer.ConstTensor.decode);
+ $.outputLayerNormWeights = reader.table(position, 44, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.QLstmInputParams();
+ $.inputToForgetWeights = reader.object(json.inputToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToCellWeights = reader.object(json.inputToCellWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToOutputWeights = reader.object(json.inputToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToForgetWeights = reader.object(json.recurrentToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToCellWeights = reader.object(json.recurrentToCellWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToOutputWeights = reader.object(json.recurrentToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.forgetGateBias = reader.object(json.forgetGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellBias = reader.object(json.cellBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.outputGateBias = reader.object(json.outputGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToInputWeights = reader.object(json.inputToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToInputWeights = reader.object(json.recurrentToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputGateBias = reader.object(json.inputGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.projectionWeights = reader.object(json.projectionWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.projectionBias = reader.object(json.projectionBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellToInputWeights = reader.object(json.cellToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellToForgetWeights = reader.object(json.cellToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellToOutputWeights = reader.object(json.cellToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputLayerNormWeights = reader.object(json.inputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.forgetLayerNormWeights = reader.object(json.forgetLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellLayerNormWeights = reader.object(json.cellLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.outputLayerNormWeights = reader.object(json.outputLayerNormWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.QLstmDescriptor = class QLstmDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.QLstmDescriptor();
+ $.cifgEnabled = reader.bool_(position, 4, true);
+ $.peepholeEnabled = reader.bool_(position, 6, false);
+ $.projectionEnabled = reader.bool_(position, 8, false);
+ $.layerNormEnabled = reader.bool_(position, 10, false);
+ $.cellClip = reader.float32_(position, 12, 0);
+ $.projectionClip = reader.float32_(position, 14, 0);
+ $.inputIntermediateScale = reader.float32_(position, 16, 0);
+ $.forgetIntermediateScale = reader.float32_(position, 18, 0);
+ $.cellIntermediateScale = reader.float32_(position, 20, 0);
+ $.outputIntermediateScale = reader.float32_(position, 22, 0);
+ $.hiddenStateZeroPoint = reader.int32_(position, 24, 0);
+ $.hiddenStateScale = reader.float32_(position, 26, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.QLstmDescriptor();
+ $.cifgEnabled = reader.value(json.cifgEnabled, true);
+ $.peepholeEnabled = reader.value(json.peepholeEnabled, false);
+ $.projectionEnabled = reader.value(json.projectionEnabled, false);
+ $.layerNormEnabled = reader.value(json.layerNormEnabled, false);
+ $.cellClip = reader.value(json.cellClip, 0);
+ $.projectionClip = reader.value(json.projectionClip, 0);
+ $.inputIntermediateScale = reader.value(json.inputIntermediateScale, 0);
+ $.forgetIntermediateScale = reader.value(json.forgetIntermediateScale, 0);
+ $.cellIntermediateScale = reader.value(json.cellIntermediateScale, 0);
+ $.outputIntermediateScale = reader.value(json.outputIntermediateScale, 0);
+ $.hiddenStateZeroPoint = reader.value(json.hiddenStateZeroPoint, 0);
+ $.hiddenStateScale = reader.value(json.hiddenStateScale, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.QLstmLayer = class QLstmLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.QLstmLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.QLstmDescriptor.decode);
+ $.inputParams = reader.table(position, 8, $root.armnnSerializer.QLstmInputParams.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.QLstmLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.QLstmDescriptor.decodeText);
+ $.inputParams = reader.object(json.inputParams, $root.armnnSerializer.QLstmInputParams.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.QuantizedLstmInputParams = class QuantizedLstmInputParams {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.QuantizedLstmInputParams();
+ $.inputToInputWeights = reader.table(position, 4, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToForgetWeights = reader.table(position, 6, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToCellWeights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.inputToOutputWeights = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToInputWeights = reader.table(position, 12, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToForgetWeights = reader.table(position, 14, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToCellWeights = reader.table(position, 16, $root.armnnSerializer.ConstTensor.decode);
+ $.recurrentToOutputWeights = reader.table(position, 18, $root.armnnSerializer.ConstTensor.decode);
+ $.inputGateBias = reader.table(position, 20, $root.armnnSerializer.ConstTensor.decode);
+ $.forgetGateBias = reader.table(position, 22, $root.armnnSerializer.ConstTensor.decode);
+ $.cellBias = reader.table(position, 24, $root.armnnSerializer.ConstTensor.decode);
+ $.outputGateBias = reader.table(position, 26, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.QuantizedLstmInputParams();
+ $.inputToInputWeights = reader.object(json.inputToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToForgetWeights = reader.object(json.inputToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToCellWeights = reader.object(json.inputToCellWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputToOutputWeights = reader.object(json.inputToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToInputWeights = reader.object(json.recurrentToInputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToForgetWeights = reader.object(json.recurrentToForgetWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToCellWeights = reader.object(json.recurrentToCellWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.recurrentToOutputWeights = reader.object(json.recurrentToOutputWeights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.inputGateBias = reader.object(json.inputGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.forgetGateBias = reader.object(json.forgetGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.cellBias = reader.object(json.cellBias, $root.armnnSerializer.ConstTensor.decodeText);
+ $.outputGateBias = reader.object(json.outputGateBias, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.QuantizedLstmLayer = class QuantizedLstmLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.QuantizedLstmLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.inputParams = reader.table(position, 6, $root.armnnSerializer.QuantizedLstmInputParams.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.QuantizedLstmLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.inputParams = reader.object(json.inputParams, $root.armnnSerializer.QuantizedLstmInputParams.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.DequantizeLayer = class DequantizeLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.DequantizeLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.DequantizeLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.MergeLayer = class MergeLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.MergeLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.MergeLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SwitchLayer = class SwitchLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SwitchLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SwitchLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.PreluLayer = class PreluLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.PreluLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.PreluLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.TransposeConvolution2dLayer = class TransposeConvolution2dLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.TransposeConvolution2dLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.TransposeConvolution2dDescriptor.decode);
+ $.weights = reader.table(position, 8, $root.armnnSerializer.ConstTensor.decode);
+ $.biases = reader.table(position, 10, $root.armnnSerializer.ConstTensor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.TransposeConvolution2dLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.TransposeConvolution2dDescriptor.decodeText);
+ $.weights = reader.object(json.weights, $root.armnnSerializer.ConstTensor.decodeText);
+ $.biases = reader.object(json.biases, $root.armnnSerializer.ConstTensor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.TransposeConvolution2dDescriptor = class TransposeConvolution2dDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.TransposeConvolution2dDescriptor();
+ $.padLeft = reader.uint32_(position, 4, 0);
+ $.padRight = reader.uint32_(position, 6, 0);
+ $.padTop = reader.uint32_(position, 8, 0);
+ $.padBottom = reader.uint32_(position, 10, 0);
+ $.strideX = reader.uint32_(position, 12, 0);
+ $.strideY = reader.uint32_(position, 14, 0);
+ $.biasEnabled = reader.bool_(position, 16, false);
+ $.dataLayout = reader.int8_(position, 18, 1);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.TransposeConvolution2dDescriptor();
+ $.padLeft = reader.value(json.padLeft, 0);
+ $.padRight = reader.value(json.padRight, 0);
+ $.padTop = reader.value(json.padTop, 0);
+ $.padBottom = reader.value(json.padBottom, 0);
+ $.strideX = reader.value(json.strideX, 0);
+ $.strideY = reader.value(json.strideY, 0);
+ $.biasEnabled = reader.value(json.biasEnabled, false);
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ return $;
+ }
+};
+
+$root.armnnSerializer.TransposeLayer = class TransposeLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.TransposeLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.TransposeDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.TransposeLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.TransposeDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.TransposeDescriptor = class TransposeDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.TransposeDescriptor();
+ $.dimMappings = reader.typedArray(position, 4, Uint32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.TransposeDescriptor();
+ $.dimMappings = reader.typedArray(json.dimMappings, Uint32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ResizeLayer = class ResizeLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ResizeLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.ResizeDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ResizeLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.ResizeDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.ResizeDescriptor = class ResizeDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.ResizeDescriptor();
+ $.targetHeight = reader.uint32_(position, 4, 0);
+ $.targetWidth = reader.uint32_(position, 6, 0);
+ $.method = reader.int8_(position, 8, 0);
+ $.dataLayout = reader.int8_(position, 10, 0);
+ $.alignCorners = reader.bool_(position, 12, false);
+ $.halfPixelCenters = reader.bool_(position, 14, false);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.ResizeDescriptor();
+ $.targetHeight = reader.value(json.targetHeight, 0);
+ $.targetWidth = reader.value(json.targetWidth, 0);
+ $.method = $root.armnnSerializer.ResizeMethod[json.method];
+ $.dataLayout = $root.armnnSerializer.DataLayout[json.dataLayout];
+ $.alignCorners = reader.value(json.alignCorners, false);
+ $.halfPixelCenters = reader.value(json.halfPixelCenters, false);
+ return $;
+ }
+};
+
+$root.armnnSerializer.StackLayer = class StackLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.StackLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.StackDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.StackLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.StackDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.StackDescriptor = class StackDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.StackDescriptor();
+ $.axis = reader.uint32_(position, 4, 0);
+ $.numInputs = reader.uint32_(position, 6, 0);
+ $.inputShape = reader.typedArray(position, 8, Uint32Array);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.StackDescriptor();
+ $.axis = reader.value(json.axis, 0);
+ $.numInputs = reader.value(json.numInputs, 0);
+ $.inputShape = reader.typedArray(json.inputShape, Uint32Array);
+ return $;
+ }
+};
+
+$root.armnnSerializer.StandInDescriptor = class StandInDescriptor {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.StandInDescriptor();
+ $.numInputs = reader.uint32_(position, 4, 0);
+ $.numOutputs = reader.uint32_(position, 6, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.StandInDescriptor();
+ $.numInputs = reader.value(json.numInputs, 0);
+ $.numOutputs = reader.value(json.numOutputs, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.StandInLayer = class StandInLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.StandInLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ $.descriptor = reader.table(position, 6, $root.armnnSerializer.StandInDescriptor.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.StandInLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ $.descriptor = reader.object(json.descriptor, $root.armnnSerializer.StandInDescriptor.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.RankLayer = class RankLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.RankLayer();
+ $.base = reader.table(position, 4, $root.armnnSerializer.LayerBase.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.RankLayer();
+ $.base = reader.object(json.base, $root.armnnSerializer.LayerBase.decodeText);
+ return $;
+ }
+};
+
+$root.armnnSerializer.Layer = class {
+
+ static decode(reader, position, type) {
+ switch (type) {
+ case 1: return $root.armnnSerializer.ActivationLayer.decode(reader, position);
+ case 2: return $root.armnnSerializer.AdditionLayer.decode(reader, position);
+ case 3: return $root.armnnSerializer.BatchToSpaceNdLayer.decode(reader, position);
+ case 4: return $root.armnnSerializer.BatchNormalizationLayer.decode(reader, position);
+ case 5: return $root.armnnSerializer.ConstantLayer.decode(reader, position);
+ case 6: return $root.armnnSerializer.Convolution2dLayer.decode(reader, position);
+ case 7: return $root.armnnSerializer.DepthwiseConvolution2dLayer.decode(reader, position);
+ case 8: return $root.armnnSerializer.FullyConnectedLayer.decode(reader, position);
+ case 9: return $root.armnnSerializer.InputLayer.decode(reader, position);
+ case 10: return $root.armnnSerializer.MultiplicationLayer.decode(reader, position);
+ case 11: return $root.armnnSerializer.OutputLayer.decode(reader, position);
+ case 12: return $root.armnnSerializer.PermuteLayer.decode(reader, position);
+ case 13: return $root.armnnSerializer.Pooling2dLayer.decode(reader, position);
+ case 14: return $root.armnnSerializer.ReshapeLayer.decode(reader, position);
+ case 15: return $root.armnnSerializer.SoftmaxLayer.decode(reader, position);
+ case 16: return $root.armnnSerializer.SpaceToBatchNdLayer.decode(reader, position);
+ case 17: return $root.armnnSerializer.DivisionLayer.decode(reader, position);
+ case 18: return $root.armnnSerializer.MinimumLayer.decode(reader, position);
+ case 19: return $root.armnnSerializer.EqualLayer.decode(reader, position);
+ case 20: return $root.armnnSerializer.MaximumLayer.decode(reader, position);
+ case 21: return $root.armnnSerializer.NormalizationLayer.decode(reader, position);
+ case 22: return $root.armnnSerializer.PadLayer.decode(reader, position);
+ case 23: return $root.armnnSerializer.RsqrtLayer.decode(reader, position);
+ case 24: return $root.armnnSerializer.FloorLayer.decode(reader, position);
+ case 25: return $root.armnnSerializer.GreaterLayer.decode(reader, position);
+ case 26: return $root.armnnSerializer.ResizeBilinearLayer.decode(reader, position);
+ case 27: return $root.armnnSerializer.SubtractionLayer.decode(reader, position);
+ case 28: return $root.armnnSerializer.StridedSliceLayer.decode(reader, position);
+ case 29: return $root.armnnSerializer.GatherLayer.decode(reader, position);
+ case 30: return $root.armnnSerializer.MeanLayer.decode(reader, position);
+ case 31: return $root.armnnSerializer.MergerLayer.decode(reader, position);
+ case 32: return $root.armnnSerializer.L2NormalizationLayer.decode(reader, position);
+ case 33: return $root.armnnSerializer.SplitterLayer.decode(reader, position);
+ case 34: return $root.armnnSerializer.DetectionPostProcessLayer.decode(reader, position);
+ case 35: return $root.armnnSerializer.LstmLayer.decode(reader, position);
+ case 36: return $root.armnnSerializer.QuantizedLstmLayer.decode(reader, position);
+ case 37: return $root.armnnSerializer.QuantizeLayer.decode(reader, position);
+ case 38: return $root.armnnSerializer.DequantizeLayer.decode(reader, position);
+ case 39: return $root.armnnSerializer.MergeLayer.decode(reader, position);
+ case 40: return $root.armnnSerializer.SwitchLayer.decode(reader, position);
+ case 41: return $root.armnnSerializer.ConcatLayer.decode(reader, position);
+ case 42: return $root.armnnSerializer.SpaceToDepthLayer.decode(reader, position);
+ case 43: return $root.armnnSerializer.PreluLayer.decode(reader, position);
+ case 44: return $root.armnnSerializer.TransposeConvolution2dLayer.decode(reader, position);
+ case 45: return $root.armnnSerializer.ResizeLayer.decode(reader, position);
+ case 46: return $root.armnnSerializer.StackLayer.decode(reader, position);
+ case 47: return $root.armnnSerializer.AbsLayer.decode(reader, position);
+ case 48: return $root.armnnSerializer.ArgMinMaxLayer.decode(reader, position);
+ case 49: return $root.armnnSerializer.SliceLayer.decode(reader, position);
+ case 50: return $root.armnnSerializer.DepthToSpaceLayer.decode(reader, position);
+ case 51: return $root.armnnSerializer.InstanceNormalizationLayer.decode(reader, position);
+ case 52: return $root.armnnSerializer.LogSoftmaxLayer.decode(reader, position);
+ case 53: return $root.armnnSerializer.ComparisonLayer.decode(reader, position);
+ case 54: return $root.armnnSerializer.StandInLayer.decode(reader, position);
+ case 55: return $root.armnnSerializer.ElementwiseUnaryLayer.decode(reader, position);
+ case 56: return $root.armnnSerializer.TransposeLayer.decode(reader, position);
+ case 57: return $root.armnnSerializer.QLstmLayer.decode(reader, position);
+ case 58: return $root.armnnSerializer.FillLayer.decode(reader, position);
+ case 59: return $root.armnnSerializer.RankLayer.decode(reader, position);
+ default: return undefined;
+ }
+ }
+
+ static decodeText(reader, json, type) {
+ switch (type) {
+ case 'ActivationLayer': return $root.armnnSerializer.ActivationLayer.decodeText(reader, json);
+ case 'AdditionLayer': return $root.armnnSerializer.AdditionLayer.decodeText(reader, json);
+ case 'BatchToSpaceNdLayer': return $root.armnnSerializer.BatchToSpaceNdLayer.decodeText(reader, json);
+ case 'BatchNormalizationLayer': return $root.armnnSerializer.BatchNormalizationLayer.decodeText(reader, json);
+ case 'ConstantLayer': return $root.armnnSerializer.ConstantLayer.decodeText(reader, json);
+ case 'Convolution2dLayer': return $root.armnnSerializer.Convolution2dLayer.decodeText(reader, json);
+ case 'DepthwiseConvolution2dLayer': return $root.armnnSerializer.DepthwiseConvolution2dLayer.decodeText(reader, json);
+ case 'FullyConnectedLayer': return $root.armnnSerializer.FullyConnectedLayer.decodeText(reader, json);
+ case 'InputLayer': return $root.armnnSerializer.InputLayer.decodeText(reader, json);
+ case 'MultiplicationLayer': return $root.armnnSerializer.MultiplicationLayer.decodeText(reader, json);
+ case 'OutputLayer': return $root.armnnSerializer.OutputLayer.decodeText(reader, json);
+ case 'PermuteLayer': return $root.armnnSerializer.PermuteLayer.decodeText(reader, json);
+ case 'Pooling2dLayer': return $root.armnnSerializer.Pooling2dLayer.decodeText(reader, json);
+ case 'ReshapeLayer': return $root.armnnSerializer.ReshapeLayer.decodeText(reader, json);
+ case 'SoftmaxLayer': return $root.armnnSerializer.SoftmaxLayer.decodeText(reader, json);
+ case 'SpaceToBatchNdLayer': return $root.armnnSerializer.SpaceToBatchNdLayer.decodeText(reader, json);
+ case 'DivisionLayer': return $root.armnnSerializer.DivisionLayer.decodeText(reader, json);
+ case 'MinimumLayer': return $root.armnnSerializer.MinimumLayer.decodeText(reader, json);
+ case 'EqualLayer': return $root.armnnSerializer.EqualLayer.decodeText(reader, json);
+ case 'MaximumLayer': return $root.armnnSerializer.MaximumLayer.decodeText(reader, json);
+ case 'NormalizationLayer': return $root.armnnSerializer.NormalizationLayer.decodeText(reader, json);
+ case 'PadLayer': return $root.armnnSerializer.PadLayer.decodeText(reader, json);
+ case 'RsqrtLayer': return $root.armnnSerializer.RsqrtLayer.decodeText(reader, json);
+ case 'FloorLayer': return $root.armnnSerializer.FloorLayer.decodeText(reader, json);
+ case 'GreaterLayer': return $root.armnnSerializer.GreaterLayer.decodeText(reader, json);
+ case 'ResizeBilinearLayer': return $root.armnnSerializer.ResizeBilinearLayer.decodeText(reader, json);
+ case 'SubtractionLayer': return $root.armnnSerializer.SubtractionLayer.decodeText(reader, json);
+ case 'StridedSliceLayer': return $root.armnnSerializer.StridedSliceLayer.decodeText(reader, json);
+ case 'GatherLayer': return $root.armnnSerializer.GatherLayer.decodeText(reader, json);
+ case 'MeanLayer': return $root.armnnSerializer.MeanLayer.decodeText(reader, json);
+ case 'MergerLayer': return $root.armnnSerializer.MergerLayer.decodeText(reader, json);
+ case 'L2NormalizationLayer': return $root.armnnSerializer.L2NormalizationLayer.decodeText(reader, json);
+ case 'SplitterLayer': return $root.armnnSerializer.SplitterLayer.decodeText(reader, json);
+ case 'DetectionPostProcessLayer': return $root.armnnSerializer.DetectionPostProcessLayer.decodeText(reader, json);
+ case 'LstmLayer': return $root.armnnSerializer.LstmLayer.decodeText(reader, json);
+ case 'QuantizedLstmLayer': return $root.armnnSerializer.QuantizedLstmLayer.decodeText(reader, json);
+ case 'QuantizeLayer': return $root.armnnSerializer.QuantizeLayer.decodeText(reader, json);
+ case 'DequantizeLayer': return $root.armnnSerializer.DequantizeLayer.decodeText(reader, json);
+ case 'MergeLayer': return $root.armnnSerializer.MergeLayer.decodeText(reader, json);
+ case 'SwitchLayer': return $root.armnnSerializer.SwitchLayer.decodeText(reader, json);
+ case 'ConcatLayer': return $root.armnnSerializer.ConcatLayer.decodeText(reader, json);
+ case 'SpaceToDepthLayer': return $root.armnnSerializer.SpaceToDepthLayer.decodeText(reader, json);
+ case 'PreluLayer': return $root.armnnSerializer.PreluLayer.decodeText(reader, json);
+ case 'TransposeConvolution2dLayer': return $root.armnnSerializer.TransposeConvolution2dLayer.decodeText(reader, json);
+ case 'ResizeLayer': return $root.armnnSerializer.ResizeLayer.decodeText(reader, json);
+ case 'StackLayer': return $root.armnnSerializer.StackLayer.decodeText(reader, json);
+ case 'AbsLayer': return $root.armnnSerializer.AbsLayer.decodeText(reader, json);
+ case 'ArgMinMaxLayer': return $root.armnnSerializer.ArgMinMaxLayer.decodeText(reader, json);
+ case 'SliceLayer': return $root.armnnSerializer.SliceLayer.decodeText(reader, json);
+ case 'DepthToSpaceLayer': return $root.armnnSerializer.DepthToSpaceLayer.decodeText(reader, json);
+ case 'InstanceNormalizationLayer': return $root.armnnSerializer.InstanceNormalizationLayer.decodeText(reader, json);
+ case 'LogSoftmaxLayer': return $root.armnnSerializer.LogSoftmaxLayer.decodeText(reader, json);
+ case 'ComparisonLayer': return $root.armnnSerializer.ComparisonLayer.decodeText(reader, json);
+ case 'StandInLayer': return $root.armnnSerializer.StandInLayer.decodeText(reader, json);
+ case 'ElementwiseUnaryLayer': return $root.armnnSerializer.ElementwiseUnaryLayer.decodeText(reader, json);
+ case 'TransposeLayer': return $root.armnnSerializer.TransposeLayer.decodeText(reader, json);
+ case 'QLstmLayer': return $root.armnnSerializer.QLstmLayer.decodeText(reader, json);
+ case 'FillLayer': return $root.armnnSerializer.FillLayer.decodeText(reader, json);
+ case 'RankLayer': return $root.armnnSerializer.RankLayer.decodeText(reader, json);
+ default: return undefined;
+ }
+ }
+};
+
+$root.armnnSerializer.AnyLayer = class AnyLayer {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.AnyLayer();
+ $.layer = reader.union(position, 4, $root.armnnSerializer.Layer.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.AnyLayer();
+ $.layer = $root.armnnSerializer.Layer.decodeText(reader, json.layer, json.layer_type);
+ return $;
+ }
+};
+
+$root.armnnSerializer.FeatureCompatibilityVersions = class FeatureCompatibilityVersions {
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.FeatureCompatibilityVersions();
+ $.bindingIdsScheme = reader.uint32_(position, 4, 0);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.FeatureCompatibilityVersions();
+ $.bindingIdsScheme = reader.value(json.bindingIdsScheme, 0);
+ return $;
+ }
+};
+
+$root.armnnSerializer.SerializedGraph = class SerializedGraph {
+
+ static identifier(reader) {
+ return reader.identifier === 'ARMN';
+ }
+
+ static create(reader) {
+ return $root.armnnSerializer.SerializedGraph.decode(reader, reader.root);
+ }
+
+ static createText(reader) {
+ return $root.armnnSerializer.SerializedGraph.decodeText(reader, reader.root);
+ }
+
+ static decode(reader, position) {
+ const $ = new $root.armnnSerializer.SerializedGraph();
+ $.layers = reader.tableArray(position, 4, $root.armnnSerializer.AnyLayer.decode);
+ $.inputIds = reader.typedArray(position, 6, Int32Array);
+ $.outputIds = reader.typedArray(position, 8, Int32Array);
+ $.featureVersions = reader.table(position, 10, $root.armnnSerializer.FeatureCompatibilityVersions.decode);
+ return $;
+ }
+
+ static decodeText(reader, json) {
+ const $ = new $root.armnnSerializer.SerializedGraph();
+ $.layers = reader.objectArray(json.layers, $root.armnnSerializer.AnyLayer.decodeText);
+ $.inputIds = reader.typedArray(json.inputIds, Int32Array);
+ $.outputIds = reader.typedArray(json.outputIds, Int32Array);
+ $.featureVersions = reader.object(json.featureVersions, $root.armnnSerializer.FeatureCompatibilityVersions.decodeText);
+ return $;
+ }
+};
diff --git a/armnn.js b/armnn.js
new file mode 100644
index 00000000000..e4da7025000
--- /dev/null
+++ b/armnn.js
@@ -0,0 +1,320 @@
+
+import * as flatbuffers from './flatbuffers.js';
+
+const armnn = {};
+
+armnn.ModelFactory = class {
+
+ match(context) {
+ const identifier = context.identifier;
+ const extension = identifier.split('.').pop().toLowerCase();
+ const stream = context.stream;
+ if (stream && extension === 'armnn') {
+ return { name: 'armnn.flatbuffers', value: stream };
+ }
+ if (extension === 'json') {
+ const obj = context.peek('json');
+ if (obj && obj.layers && obj.inputIds && obj.outputIds) {
+ return { name: 'armnn.flatbuffers.json', value: obj };
+ }
+ }
+ return undefined;
+ }
+
+ async open(context, target) {
+ await context.require('./armnn-schema');
+ armnn.schema = flatbuffers.get('armnn').armnnSerializer;
+ let model = null;
+ switch (target.name) {
+ case 'armnn.flatbuffers': {
+ try {
+ const stream = target.value;
+ const reader = flatbuffers.BinaryReader.open(stream);
+ model = armnn.schema.SerializedGraph.create(reader);
+ } catch (error) {
+ const message = error && error.message ? error.message : error.toString();
+ throw new armnn.Error(`File format is not armnn.SerializedGraph (${message.replace(/\.$/, '')}).`);
+ }
+ break;
+ }
+ case 'armnn.flatbuffers.json': {
+ try {
+ const obj = target.value;
+ const reader = flatbuffers.TextReader.open(obj);
+ model = armnn.schema.SerializedGraph.createText(reader);
+ } catch (error) {
+ const message = error && error.message ? error.message : error.toString();
+ throw new armnn.Error(`File text format is not armnn.SerializedGraph (${message.replace(/\.$/, '')}).`);
+ }
+ break;
+ }
+ default: {
+ throw new armnn.Error(`Unsupported Arm NN '${target}'.`);
+ }
+ }
+ const metadata = await context.metadata('armnn-metadata.json');
+ return new armnn.Model(metadata, model);
+ }
+};
+
+armnn.Model = class {
+
+ constructor(metadata, model) {
+ this.format = 'Arm NN';
+ this.graphs = [ new armnn.Graph(metadata, model) ];
+ }
+};
+
+armnn.Graph = class {
+
+ constructor(metadata, graph) {
+ this.name = '';
+ this.nodes = [];
+ this.inputs = [];
+ this.outputs = [];
+ const counts = new Map();
+ for (const layer of graph.layers) {
+ const base = armnn.Node.getBase(layer);
+ for (const slot of base.inputSlots) {
+ const name = `${slot.connection.sourceLayerIndex}:${slot.connection.outputSlotIndex}`;
+ counts.set(name, counts.has(name) ? counts.get(name) + 1 : 1);
+ }
+ }
+ const values = new Map();
+ const value = (layerIndex, slotIndex, tensor) => {
+ const name = `${layerIndex}:${slotIndex}`;
+ if (!values.has(name)) {
+ const layer = graph.layers[layerIndex];
+ const base = layerIndex < graph.layers.length ? armnn.Node.getBase(layer) : null;
+ const tensorInfo = base && slotIndex < base.outputSlots.length ? base.outputSlots[slotIndex].tensorInfo : null;
+ values.set(name, new armnn.Value(name, tensorInfo, tensor));
+ }
+ return values.get(name);
+ };
+ const layers = graph.layers.filter((layer) => {
+ const base = armnn.Node.getBase(layer);
+ if (base.layerType == armnn.schema.LayerType.Constant && base.outputSlots.length === 1 && layer.layer.input) {
+ /* eslint-disable prefer-destructuring */
+ const slot = base.outputSlots[0];
+ /* eslint-enable prefer-destructuring */
+ const name = `${base.index}:${slot.index}`;
+ if (counts.get(name) === 1) {
+ const tensor = new armnn.Tensor(layer.layer.input, 'Constant');
+ value(base.index, slot.index, tensor);
+ return false;
+ }
+ }
+ return true;
+ });
+ for (const layer of layers) {
+ const base = armnn.Node.getBase(layer);
+ for (const slot of base.inputSlots) {
+ value(slot.connection.sourceLayerIndex, slot.connection.outputSlotIndex);
+ }
+ }
+ for (const layer of layers) {
+ const base = armnn.Node.getBase(layer);
+ switch (base.layerType) {
+ case armnn.schema.LayerType.Input: {
+ const name = base ? base.layerName : '';
+ for (const slot of base.outputSlots) {
+ const argument = new armnn.Argument(name, [ value(base.index, slot.index) ]);
+ this.inputs.push(argument);
+ }
+ break;
+ }
+ case armnn.schema.LayerType.Output: {
+ const base = armnn.Node.getBase(layer);
+ const name = base ? base.layerName : '';
+ for (const slot of base.inputSlots) {
+ const argument = new armnn.Argument(name, [ value(slot.connection.sourceLayerIndex, slot.connection.outputSlotIndex) ]);
+ this.outputs.push(argument);
+ }
+ break;
+ }
+ default:
+ this.nodes.push(new armnn.Node(metadata, layer, value));
+ break;
+ }
+ }
+ }
+};
+
+armnn.Node = class {
+
+ constructor(metadata, layer, value) {
+ const type = layer.layer.constructor.name;
+ this.type = Object.assign({}, metadata.type(type) || { name: type });
+ this.type.name = this.type.name.replace(/Layer$/, '');
+ this.name = '';
+ this.outputs = [];
+ this.inputs = [];
+ this.attributes = [];
+ const inputSchemas = (this.type && this.type.inputs) ? [...this.type.inputs] : [ { name: 'input' } ];
+ const outputSchemas = (this.type && this.type.outputs) ? [...this.type.outputs] : [ { name: 'output' } ];
+ const base = armnn.Node.getBase(layer);
+ if (base) {
+ this.name = base.layerName;
+ const inputs = [...base.inputSlots];
+ while (inputs.length > 0) {
+ const inputSchema = inputSchemas.length > 0 ? inputSchemas.shift() : { name: '?' };
+ const count = inputSchema.list ? inputs.length : 1;
+ const argument = new armnn.Argument(inputSchema.name, inputs.splice(0, count).map((inputSlot) => {
+ return value(inputSlot.connection.sourceLayerIndex, inputSlot.connection.outputSlotIndex);
+ }));
+ this.inputs.push(argument);
+ }
+ const outputs = [...base.outputSlots];
+ while (outputs.length > 0) {
+ const outputSchema = outputSchemas.length > 0 ? outputSchemas.shift() : { name: '?' };
+ const count = outputSchema.list ? outputs.length : 1;
+ this.outputs.push(new armnn.Argument(outputSchema.name, outputs.splice(0, count).map((outputSlot) => {
+ return value(base.index, outputSlot.index);
+ })));
+ }
+ }
+ if (layer.layer) {
+ if (layer.layer.descriptor && this.type.attributes) {
+ for (const [name, value] of Object.entries(layer.layer.descriptor)) {
+ const attribute = new armnn.Attribute(metadata.attribute(type, name), name, value);
+ this.attributes.push(attribute);
+ }
+ }
+ for (const [name, tensor] of Object.entries(layer.layer).filter(([, value]) => value instanceof armnn.schema.ConstTensor)) {
+ const value = new armnn.Value('', tensor.info, new armnn.Tensor(tensor));
+ const argument = new armnn.Argument(name, [ value ]);
+ this.inputs.push(argument);
+ }
+ }
+ }
+
+ static getBase(layer) {
+ return layer.layer.base.base ? layer.layer.base.base : layer.layer.base;
+ }
+
+ static makeKey(layer_id, index) {
+ return `${layer_id}_${index}`;
+ }
+};
+
+armnn.Attribute = class {
+
+ constructor(metadata, name, value) {
+ this.name = name;
+ this.type = metadata ? metadata.type : null;
+ this.value = ArrayBuffer.isView(value) ? Array.from(value) : value;
+ if (armnn.schema[this.type]) {
+ this.value = armnn.Utility.enum(this.type, this.value);
+ }
+ }
+};
+
+armnn.Argument = class {
+
+ constructor(name, value) {
+ this.name = name;
+ this.value = value;
+ }
+};
+
+armnn.Value = class {
+
+ constructor(name, tensorInfo, initializer) {
+ if (typeof name !== 'string') {
+ throw new armnn.Error(`Invalid value identifier '${JSON.stringify(name)}'.`);
+ }
+ this.name = name;
+ this.type = new armnn.TensorType(tensorInfo);
+ this.initializer = initializer;
+ if (tensorInfo.quantizationScale !== 0 ||
+ tensorInfo.quantizationOffset !== 0 ||
+ tensorInfo.quantizationScales.length > 0 ||
+ tensorInfo.quantizationDim !== 0) {
+ this.quantization = {
+ type: 'linear',
+ dimension: tensorInfo.quantizationDim,
+ scale: [ tensorInfo.quantizationScale ],
+ offset: [ tensorInfo.quantizationOffset ]
+ };
+ }
+ }
+};
+
+armnn.Tensor = class {
+
+ constructor(tensor, category) {
+ this.type = new armnn.TensorType(tensor.info);
+ this.category = category || '';
+ const data = tensor.data.data.slice(0);
+ this.values = new Uint8Array(data.buffer, data.byteOffset, data.byteLength);
+ }
+};
+
+armnn.TensorType = class {
+
+ constructor(tensorInfo) {
+ const dataType = tensorInfo.dataType;
+ switch (dataType) {
+ case 0: this.dataType = 'float16'; break;
+ case 1: this.dataType = 'float32'; break;
+ case 2: this.dataType = 'quint8'; break; // QuantisedAsymm8
+ case 3: this.dataType = 'int32'; break;
+ case 4: this.dataType = 'boolean'; break;
+ case 5: this.dataType = 'qint16'; break; // QuantisedSymm16
+ case 6: this.dataType = 'quint8'; break; // QAsymmU8
+ case 7: this.dataType = 'qint16'; break; // QSymmS16
+ case 8: this.dataType = 'qint8'; break; // QAsymmS8
+ case 9: this.dataType = 'qint8'; break; // QSymmS8
+ default:
+ throw new armnn.Error(`Unsupported data type '${JSON.stringify(dataType)}'.`);
+ }
+ this.shape = new armnn.TensorShape(tensorInfo.dimensions);
+ }
+
+ toString() {
+ return this.dataType + this.shape.toString();
+ }
+};
+
+armnn.TensorShape = class {
+
+ constructor(dimensions) {
+ this.dimensions = Array.from(dimensions);
+ }
+
+ toString() {
+ if (!this.dimensions || this.dimensions.length == 0) {
+ return '';
+ }
+ return `[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`;
+ }
+};
+
+armnn.Utility = class {
+
+ static enum(name, value) {
+ const type = name && armnn.schema ? armnn.schema[name] : undefined;
+ if (type) {
+ armnn.Utility._enums = armnn.Utility._enums || new Map();
+ if (!armnn.Utility._enums.has(name)) {
+ const entries = new Map(Object.entries(type).map(([key, value]) => [ value, key ]));
+ armnn.Utility._enums.set(name, entries);
+ }
+ const entries = armnn.Utility._enums.get(name);
+ if (entries.has(value)) {
+ return entries.get(value);
+ }
+ }
+ return value;
+ }
+};
+
+armnn.Error = class extends Error {
+
+ constructor(message) {
+ super(message);
+ this.name = 'Error loading Arm NN model.';
+ }
+};
+
+export const ModelFactory = armnn.ModelFactory;
diff --git a/barracuda.js b/barracuda.js
new file mode 100755
index 00000000000..7cebabcd888
--- /dev/null
+++ b/barracuda.js
@@ -0,0 +1,425 @@
+
+// Experimental
+
+import * as base from './base.js';
+
+const barracuda = {};
+
+barracuda.ModelFactory = class {
+
+ match(context) {
+ const stream = context.stream;
+ if (stream && stream.length > 12) {
+ const buffer = stream.peek(12);
+ if (buffer[0] <= 0x20 && buffer.subarray(1, 8).every((value) => value == 0x00)) {
+ return 'barracuda';
+ }
+ }
+ return null;
+ }
+
+ async open(context) {
+ const metadata = barracuda.Metadata.open();
+ const model = new barracuda.NNModel(context.stream.peek());
+ return new barracuda.Model(metadata, model);
+ }
+};
+
+barracuda.Model = class {
+
+ constructor(metadata, model) {
+ const version = model.version.toString();
+ this.format = `Barracuda v${version}`;
+ this.graphs = [ new barracuda.Graph(metadata, model) ];
+ }
+};
+
+barracuda.Graph = class {
+
+ constructor(metadata, model) {
+ this.name = '';
+ this.inputs = [];
+ this.outputs = [];
+ this.nodes = [];
+ const values = new Map();
+ values.map = (name, type, tensor) => {
+ if (!values.has(name)) {
+ type = tensor ? tensor.type : type;
+ values.set(name, new barracuda.Value(name, type, tensor));
+ } else if (type || tensor) {
+ throw new barracuda.Error(`Duplicate value '${name}'.`);
+ }
+ return values.get(name);
+ };
+ const layers = [];
+ for (const layer of model.layers) {
+ if (layer.type !== 255 || layer.inputs.length > 0) {
+ layers.push(layer);
+ } else {
+ for (const tensor of layer.tensors) {
+ values.map(tensor.name, null, new barracuda.Tensor(tensor));
+ }
+ }
+ }
+ for (const input of model.inputs) {
+ const shape = new barracuda.TensorShape(input.shape);
+ const type = new barracuda.TensorType(4, shape);
+ const argument = new barracuda.Argument(input.name, [ values.map(input.name, type) ]);
+ this.inputs.push(argument);
+ }
+ for (const output of model.outputs) {
+ const argument = new barracuda.Argument(output, [ values.map(output) ]);
+ this.outputs.push(argument);
+ }
+ for (const layer of layers) {
+ const node = new barracuda.Node(metadata, layer, null, values);
+ this.nodes.push(node);
+ }
+ }
+};
+
+barracuda.Argument = class {
+
+ constructor(name, value) {
+ this.name = name;
+ this.value = value;
+ }
+};
+
+barracuda.Value = class {
+
+ constructor(name, type, initializer) {
+ this.name = name;
+ this.type = type || null;
+ this.initializer = initializer || null;
+ }
+};
+
+
+barracuda.Node = class {
+
+ constructor(metadata, layer, type, values) {
+ this.name = layer.name || '';
+ this.type = type ? type : metadata.type(layer.type);
+ this.inputs = [];
+ this.outputs = [];
+ this.attributes = [];
+ const inputs = Array.prototype.slice.call(this.type.inputs || [ 'input' ]);
+ if (this.type.inputs && this.type.inputs.length === 1 && this.type.inputs[0].name === 'inputs') {
+ const argument = new barracuda.Argument('inputs', layer.inputs.map((input) => values.map(input)));
+ this.inputs.push(argument);
+ } else if (layer.inputs) {
+ for (let i = 0; i < layer.inputs.length; i++) {
+ const input = layer.inputs[i];
+ const name = inputs.length > 0 ? inputs.shift().name : i.toString();
+ const argument = new barracuda.Argument(name, [ values.map(input) ]);
+ this.inputs.push(argument);
+ }
+ }
+ if (layer.tensors) {
+ for (let i = 0; i < layer.tensors.length; i++) {
+ const tensor = layer.tensors[i];
+ const initializer = new barracuda.Tensor(tensor);
+ const name = inputs.length > 0 ? inputs.shift().name : i.toString();
+ const argument = new barracuda.Argument(name, [ values.map(tensor.name, initializer.type, initializer) ]);
+ this.inputs.push(argument);
+ }
+ }
+ if (layer.inputs !== undefined) {
+ const argument = new barracuda.Argument('output', [ values.map(this.name) ]);
+ this.outputs.push(argument);
+ }
+ if (layer.activation !== undefined && (layer.type === 50 || layer.activation !== 0)) {
+ const type = barracuda.Activation[layer.activation];
+ if (!type) {
+ throw new barracuda.Error(`Unsupported activation '${layer.activation}'.`);
+ }
+ const node = new barracuda.Node(metadata, {}, { name: type, category: 'Activation' }, values);
+ this.chain = [ node ];
+ }
+ const attribute = (name, type, value, defaultValue) => {
+ if (value === undefined) {
+ return;
+ }
+ if (Array.isArray(defaultValue) && Array.isArray(value) && value.length == defaultValue.length && value.every((v, i) => v === defaultValue[i])) {
+ return;
+ }
+ if (typeof defaultValue == 'function' && defaultValue(value)) {
+ return;
+ }
+ if (defaultValue === value) {
+ return;
+ }
+ const attribute = new barracuda.Attribute(name, type, value);
+ this.attributes.push(attribute);
+ };
+ attribute('strides', 'int32[]', layer.strides, []);
+ attribute('pads', 'int32[]', layer.pads, (value) => Array.isArray(value) && (value.every((v) => v === 0) || value.every((v) => v === -1)));
+ attribute('size', 'int32[]', layer.pool_size, []);
+ attribute('alpha', 'float32', layer.alpha, 1);
+ attribute('beta', 'float32', layer.beta, 0);
+ attribute('axis', 'int32', layer.axis, -1);
+ }
+};
+
+barracuda.Attribute = class {
+
+ constructor(name, type, value) {
+ this.name = name;
+ this.type = type;
+ this.value = value;
+ }
+};
+
+barracuda.Tensor = class {
+
+ constructor(tensor) {
+ this.type = new barracuda.TensorType(tensor.itemsize, new barracuda.TensorShape(tensor.shape));
+ this.values = tensor.data;
+ }
+};
+
+barracuda.TensorType = class {
+
+ constructor(itemsize, shape) {
+ switch (itemsize) {
+ case 4: this.dataType = 'float32'; break;
+ default: throw new barracuda.Error(`Unsupported data type size '${itemsize}'.`);
+ }
+ this.shape = shape;
+ }
+
+ toString() {
+ return this.dataType + this.shape.toString();
+ }
+};
+
+barracuda.TensorShape = class {
+
+ constructor(dimensions) {
+ this.dimensions = dimensions;
+ }
+
+ toString() {
+ return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension ? dimension.toString() : '?').join(',')}]`) : '';
+ }
+};
+
+barracuda.NNModel = class {
+
+ constructor(buffer) {
+ // https://github.com/Unity-Technologies/barracuda-release/blob/release/1.3.2/Barracuda/Runtime/Core/Model.cs
+ const reader = new barracuda.BinaryReader(buffer);
+ this.version = reader.int32();
+ reader.int32();
+ this.inputs = new Array(reader.int32());
+ for (let i = 0; i < this.inputs.length; i++) {
+ this.inputs[i] = {
+ name: reader.string(),
+ shape: reader.shape()
+ };
+ }
+ this.outputs = reader.strings();
+ this.memories = new Array(reader.int32());
+ for (let i = 0; i < this.memories.length; i++) {
+ this.memories[i] = {
+ shape: reader.shape(),
+ in: reader.string(),
+ out: reader.string()
+ };
+ }
+ this.layers = new Array(reader.int32());
+ for (let i = 0; i < this.layers.length; i++) {
+ const layer = {};
+ layer.name = reader.string();
+ layer.type = reader.int32();
+ layer.activation = reader.int32();
+ reader.int32();
+ reader.int32();
+ layer.pads = reader.int32s();
+ layer.strides = reader.int32s();
+ layer.pool_size = reader.int32s();
+ layer.axis = reader.int32();
+ layer.alpha = reader.float32();
+ layer.beta = reader.float32();
+ reader.int32();
+ layer.inputs = reader.strings();
+ layer.tensors = [];
+ const tensorsLength = reader.int32();
+ for (let j = 0; j < tensorsLength; j++) {
+ layer.tensors.push({
+ name: reader.string(),
+ shape: reader.shape(),
+ offset: reader.int64(),
+ itemsize: reader.int32(),
+ length: reader.int32()
+ });
+ }
+ this.layers[i] = layer;
+ }
+ const position = reader.position;
+ for (const layer of this.layers) {
+ for (const tensor of layer.tensors) {
+ reader.seek(position + (tensor.offset * tensor.itemsize));
+ tensor.data = reader.read(tensor.length * tensor.itemsize);
+ }
+ }
+ }
+};
+
+barracuda.Activation = {
+ 0: "Linear", 1: "Relu", 2: "Softmax", 3: "Tanh", 4: "Sigmoid", 5: "Elu", 6: "Relu6", 7: "LeakyRelu", 8: "Selu", 9: "Swish",
+ 10: "LogSoftmax", 11: "Softplus", 12: "Softsign", 13: "PRelu",
+ 20: "Hardmax", 21: "HardSigmoid",
+ 100: "Abs", 101: "Neg", 102: "Ceil", 103: "Clip", 104: "Floor", 105: "Round",
+ 110: "Reciprocal", 111: "Sqrt", 113: "Exp", 114: "Log",
+ 200: "Acos", 201: "Acosh", 202: "Asin", 203: "Asinh", 204: "Atan", 205: "Atanh", 206: "Cos", 207: "Cosh", 208: "Sin", 209: "Sinh", 210: "Tan"
+};
+
+barracuda.BinaryReader = class extends base.BinaryReader {
+
+ int32s() {
+ const values = new Array(this.int32());
+ for (let i = 0; i < values.length; i++) {
+ values[i] = this.int32();
+ }
+ return values;
+ }
+
+ string() {
+ let content = '';
+ const size = this.int32();
+ let position = this._position;
+ this.skip(size);
+ for (let i = 0; i < size; i++) {
+ content += String.fromCharCode(this._buffer[position++]);
+ }
+ return content;
+ }
+
+ strings() {
+ const values = [];
+ const length = this.int32();
+ for (let i = 0; i < length; i++) {
+ values.push(this.string());
+ }
+ return values;
+ }
+
+ shape() {
+ return this.int32s();
+ }
+};
+
+barracuda.Metadata = class {
+
+ static open() {
+ barracuda.Metadata._metadata = barracuda.Metadata._metadata || new barracuda.Metadata();
+ return barracuda.Metadata._metadata;
+ }
+
+ constructor() {
+ this._types = new Map();
+ const register = (id, name, category, inputs) => {
+ this._types.set(id, { name: name, category: category, inputs: (inputs || []).map((input) => {
+ return { name: input };
+ }) });
+ };
+ register(0, 'Nop', '');
+ register(1, 'Dense', 'Layer', [ 'input', 'kernel', 'bias' ]);
+ register(2, 'MatMul', '', [ 'input', 'kernel', 'bias' ]);
+ register(20, 'Conv2D', 'Layer', [ 'input', 'kernel', 'bias' ]);
+ register(21, 'DepthwiseConv2D', 'Layer', [ 'input', 'kernel', 'bias' ]);
+ register(22, 'Conv2DTrans', 'Layer', [ 'input', 'kernel', 'bias' ]);
+ register(23, 'Upsample2D', 'Data');
+ register(25, 'MaxPool2D', 'Pool');
+ register(26, 'AvgPool2D', 'Pool');
+ register(27, 'GlobalMaxPool2D', 'Pool');
+ register(28, 'GlobalAvgPool2D', 'Pool');
+ register(29, 'Border2D', '');
+ register(30, 'Conv3D', 'Layer');
+ register(32, 'Conv3DTrans', 'Layer');
+ register(33, 'Upsample3D', 'Data');
+ register(35, 'MaxPool3D', 'Pool');
+ register(36, 'AvgPool3D', 'Pool');
+ register(37, 'GlobalMaxPool3D', 'Pool');
+ register(38, 'GlobalAvgPool3D', 'Pool');
+ register(39, 'Border3D', '');
+ register(50, 'Activation', '', [ 'input' ]);
+ register(51, 'ScaleBias', 'Normalization', [ 'input', 'scale', 'bias' ]);
+ register(52, 'Normalization', 'Normalization');
+ register(53, 'LRN', 'Normalization');
+ register(60, 'Dropout', 'Dropout');
+ register(64, 'RandomNormal', '');
+ register(65, 'RandomUniform', '');
+ register(66, 'Multinomial', '');
+ register(67, 'OneHot', '');
+ register(68, 'TopKIndices', '');
+ register(69, 'TopKValues', '');
+ register(100, 'Add', '', [ 'inputs' ]);
+ register(101, 'Sub', '', [ 'inputs' ]);
+ register(102, 'Mul', '', [ 'inputs' ]);
+ register(103, 'RealDiv', '', [ 'inputs' ]);
+ register(104, 'Pow', '', [ 'inputs' ]);
+ register(110, 'Minimum', '', [ 'inputs' ]);
+ register(111, 'Maximum', '', [ 'inputs' ]);
+ register(112, 'Mean', '', [ 'inputs' ]);
+ register(120, 'ReduceL1', '', [ 'inputs' ]);
+ register(121, 'ReduceL2', '', [ 'inputs' ]);
+ register(122, 'ReduceLogSum', '', [ 'inputs' ]);
+ register(123, 'ReduceLogSumExp', '', [ 'inputs' ]);
+ register(124, 'ReduceMax', '', [ 'inputs' ]);
+ register(125, 'ReduceMean', '', [ 'inputs' ]);
+ register(126, 'ReduceMin', '', [ 'inputs' ]);
+ register(127, 'ReduceProd', '', [ 'inputs' ]);
+ register(128, 'ReduceSum', '', [ 'inputs' ]);
+ register(129, 'ReduceSumSquare', '', [ 'inputs' ]);
+ register(140, 'Greater', '');
+ register(141, 'GreaterEqual', '');
+ register(142, 'Less', '');
+ register(143, 'LessEqual', '');
+ register(144, 'Equal', '');
+ register(145, 'LogicalOr', '');
+ register(146, 'LogicalAnd', '');
+ register(147, 'LogicalNot', '');
+ register(148, 'LogicalXor', '');
+ register(160, 'Pad2DReflect', '');
+ register(161, 'Pad2DSymmetric', '');
+ register(162, 'Pad2DEdge', '');
+ register(200, 'Flatten', 'Shape');
+ register(201, 'Reshape', 'Shape');
+ register(202, 'Transpose', '');
+ register(203, 'Squeeze', '');
+ register(204, 'Unsqueeze', '');
+ register(205, 'Gather', '');
+ register(206, 'DepthToSpace', '');
+ register(207, 'SpaceToDepth', '');
+ register(208, 'Expand', '');
+ register(209, 'Resample2D', '');
+ register(210, 'Concat', 'Tensor', [ 'inputs' ]);
+ register(211, 'StridedSlice', 'Shape');
+ register(212, 'Tile', '');
+ register(213, 'Shape', '');
+ register(214, 'NonMaxSuppression', '');
+ register(215, 'LSTM', '');
+ register(255, 'Load', '');
+ }
+
+ type(name) {
+ if (!this._types.has(name)) {
+ this._types.set(name, { name: name.toString() });
+ }
+ return this._types.get(name);
+ }
+};
+
+barracuda.Error = class extends Error {
+
+ constructor(message) {
+ super(message);
+ this.name = 'Error loading Barracuda model.';
+ }
+};
+
+export const ModelFactory = barracuda.ModelFactory;
+
diff --git a/base.js b/base.js
new file mode 100644
index 00000000000..3e845adf5a8
--- /dev/null
+++ b/base.js
@@ -0,0 +1,1188 @@
+
+const base = {};
+
+base.Int64 = class Int64 {
+
+ constructor(low, high) {
+ this.low = low | 0;
+ this.high = high | 0;
+ }
+
+ static create(value) {
+ if (isNaN(value)) {
+ return base.Int64.zero;
+ }
+ if (value <= -9223372036854776000) {
+ return base.Int64.min;
+ }
+ if (value + 1 >= 9223372036854776000) {
+ return base.Int64.max;
+ }
+ if (value < 0) {
+ return base.Int64.create(-value).negate();
+ }
+ return new base.Int64((value % 4294967296) | 0, (value / 4294967296));
+ }
+
+ get isZero() {
+ return this.low === 0 && this.high === 0;
+ }
+
+ get isNegative() {
+ return this.high < 0;
+ }
+
+ negate() {
+ if (this.equals(base.Int64.min)) {
+ return base.Int64.min;
+ }
+ return this.not().add(base.Int64.one);
+ }
+
+ not() {
+ return new base.Int64(~this.low, ~this.high);
+ }
+
+ equals(other) {
+ if (!(other instanceof base.Int64) && (this.high >>> 31) === 1 && (other.high >>> 31) === 1) {
+ return false;
+ }
+ return this.high === other.high && this.low === other.low;
+ }
+
+ compare(other) {
+ if (this.equals(other)) {
+ return 0;
+ }
+ const thisNeg = this.isNegative;
+ const otherNeg = other.isNegative;
+ if (thisNeg && !otherNeg) {
+ return -1;
+ }
+ if (!thisNeg && otherNeg) {
+ return 1;
+ }
+ return this.subtract(other).isNegative ? -1 : 1;
+ }
+
+ add(other) {
+ return base.Utility.add(this, other, false);
+ }
+
+ subtract(other) {
+ return base.Utility.subtract(this, other, false);
+ }
+
+ multiply(other) {
+ return base.Utility.multiply(this, other, false);
+ }
+
+ divide(other) {
+ return base.Utility.divide(this, other, false);
+ }
+
+ toInteger() {
+ return this.low;
+ }
+
+ toNumber() {
+ if (this.high === 0) {
+ return this.low >>> 0;
+ }
+ if (this.high === -1) {
+ return this.low;
+ }
+ return (this.high * 4294967296) + (this.low >>> 0);
+ }
+
+ toString(radix) {
+ const r = radix || 10;
+ if (r < 2 || r > 16) {
+ throw new RangeError('radix');
+ }
+ if (this.isZero) {
+ return '0';
+ }
+ if (this.high < 0) {
+ if (this.equals(base.Int64.min)) {
+ const radix = new base.Int64(r, 0);
+ const div = this.divide(radix);
+ const remainder = div.multiply(radix).subtract(this);
+ return div.toString(radix) + (remainder.low >>> 0).toString(radix);
+ }
+ return `-${this.negate().toString(r)}`;
+ }
+ if (this.high === 0) {
+ return this.low.toString(radix);
+ }
+ return base.Utility.text(this, false, r);
+ }
+};
+
+base.Int64.min = new base.Int64(0, -2147483648);
+base.Int64.zero = new base.Int64(0, 0);
+base.Int64.one = new base.Int64(1, 0);
+base.Int64.negativeOne = new base.Int64(-1, 0);
+base.Int64.power24 = new base.Int64(1 << 24, 0);
+base.Int64.max = new base.Int64(0, 2147483647);
+
+base.Uint64 = class Uint64 {
+
+ constructor(low, high) {
+ this.low = low | 0;
+ this.high = high | 0;
+ }
+
+ static create(value) {
+ if (isNaN(value)) {
+ return base.Uint64.zero;
+ }
+ if (value < 0) {
+ return base.Uint64.zero;
+ }
+ if (value >= 18446744073709552000) {
+ return base.Uint64.max;
+ }
+ if (value < 0) {
+ return base.Uint64.create(-value).negate();
+ }
+ return new base.Uint64((value % 4294967296) | 0, (value / 4294967296));
+ }
+
+ get isZero() {
+ return this.low === 0 && this.high === 0;
+ }
+
+ get isNegative() {
+ return false;
+ }
+
+ negate() {
+ return this.not().add(base.Int64.one);
+ }
+
+ not() {
+ return new base.Uint64(~this.low, ~this.high);
+ }
+
+ equals(other) {
+ if (!(other instanceof base.Uint64) && (this.high >>> 31) === 1 && (other.high >>> 31) === 1) {
+ return false;
+ }
+ return this.high === other.high && this.low === other.low;
+ }
+
+ compare(other) {
+ if (this.equals(other)) {
+ return 0;
+ }
+ const thisNeg = this.isNegative;
+ const otherNeg = other.isNegative;
+ if (thisNeg && !otherNeg) {
+ return -1;
+ }
+ if (!thisNeg && otherNeg) {
+ return 1;
+ }
+ return (other.high >>> 0) > (this.high >>> 0) || (other.high === this.high && (other.low >>> 0) > (this.low >>> 0)) ? -1 : 1;
+ }
+
+ add(other) {
+ return base.Utility.add(this, other, true);
+ }
+
+ subtract(other) {
+ return base.Utility.subtract(this, other, true);
+ }
+
+ multiply(other) {
+ return base.Utility.multiply(this, other, true);
+ }
+
+ divide(other) {
+ return base.Utility.divide(this, other, true);
+ }
+
+ toInteger() {
+ return this.low >>> 0;
+ }
+
+ toNumber() {
+ if (this.high === 0) {
+ return this.low >>> 0;
+ }
+ return ((this.high >>> 0) * 4294967296) + (this.low >>> 0);
+ }
+
+ toString(radix) {
+ const r = radix || 10;
+ if (r < 2 || 36 < r) {
+ throw new RangeError('radix');
+ }
+ if (this.isZero) {
+ return '0';
+ }
+ if (this.high === 0) {
+ return this.low.toString(radix);
+ }
+ return base.Utility.text(this, true, r);
+ }
+};
+
+base.Utility = class {
+
+ static add(a, b, unsigned) {
+ const a48 = a.high >>> 16;
+ const a32 = a.high & 0xFFFF;
+ const a16 = a.low >>> 16;
+ const a00 = a.low & 0xFFFF;
+ const b48 = b.high >>> 16;
+ const b32 = b.high & 0xFFFF;
+ const b16 = b.low >>> 16;
+ const b00 = b.low & 0xFFFF;
+ let c48 = 0;
+ let c32 = 0;
+ let c16 = 0;
+ let c00 = 0;
+ c00 += a00 + b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 + b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 + b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 + b48;
+ c48 &= 0xFFFF;
+ return base.Utility._create((c16 << 16) | c00, (c48 << 16) | c32, unsigned);
+ }
+
+ static subtract(a, b, unsigned) {
+ return base.Utility.add(a, b.negate(), unsigned);
+ }
+
+ static multiply(a, b, unsigned) {
+ if (a.isZero) {
+ return base.Int64.zero;
+ }
+ if (b.isZero) {
+ return base.Int64.zero;
+ }
+ if (a.equals(base.Int64.min)) {
+ return b.isOdd() ? base.Int64.min : base.Int64.zero;
+ }
+ if (b.equals(base.Int64.min)) {
+ return a.isOdd() ? base.Int64.min : base.Int64.zero;
+ }
+ if (a.isNegative) {
+ if (b.isNegative) {
+ return a.negate().multiply(b.negate());
+ }
+ return a.negate().multiply(b).negate();
+ } else if (b.isNegative) {
+ return a.multiply(b.negate()).negate();
+ }
+ if (a.compare(base.Int64.power24) < 0 && b.compare(base.Int64.power24) < 0) {
+ return unsigned ? base.Uint64.create(a.toNumber() * b.toNumber()) : base.Int64.create(a.toNumber() * b.toNumber());
+ }
+ const a48 = a.high >>> 16;
+ const a32 = a.high & 0xFFFF;
+ const a16 = a.low >>> 16;
+ const a00 = a.low & 0xFFFF;
+ const b48 = b.high >>> 16;
+ const b32 = b.high & 0xFFFF;
+ const b16 = b.low >>> 16;
+ const b00 = b.low & 0xFFFF;
+ let c48 = 0;
+ let c32 = 0;
+ let c16 = 0;
+ let c00 = 0;
+ c00 += a00 * b00;
+ c16 += c00 >>> 16;
+ c00 &= 0xFFFF;
+ c16 += a16 * b00;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c16 += a00 * b16;
+ c32 += c16 >>> 16;
+ c16 &= 0xFFFF;
+ c32 += a32 * b00;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a16 * b16;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c32 += a00 * b32;
+ c48 += c32 >>> 16;
+ c32 &= 0xFFFF;
+ c48 += a48 * b00 + a32 * b16 + a16 * b32 + a00 * b48;
+ c48 &= 0xFFFF;
+ return base.Utility._create((c16 << 16) | c00, (c48 << 16) | c32, unsigned);
+ }
+
+ static divide(a, b, unsigned) {
+ if (b.isZero) {
+ throw new Error('Division by zero.');
+ }
+ if (a.isZero) {
+ return unsigned ? base.Uint64.zero : base.Int64.zero;
+ }
+ let approx;
+ let remainder;
+ let result;
+ if (!unsigned) {
+ if (a.equals(base.Int64.min)) {
+ if (b.equals(base.Int64.one) || b.equals(base.Int64.negativeOne)) {
+ return base.Int64.min;
+ } else if (b.equals(base.Int64.min)) {
+ return base.Int64.one;
+ }
+ const half = base.Utility._shiftRight(a, unsigned, 1);
+ const halfDivide = half.divide(b);
+ approx = base.Utility._shiftLeft(halfDivide, halfDivide instanceof base.Uint64, 1);
+ if (approx.equals(base.Int64.zero)) {
+ return b.isNegative ? base.Int64.one : base.Int64.negativeOne;
+ }
+ remainder = a.subtract(b.multiply(approx));
+ result = approx.add(remainder.divide(b));
+ return result;
+ } else if (b.equals(base.Int64.min)) {
+ return base.Int64.zero;
+ }
+ if (a.isNegative) {
+ if (b.isNegative) {
+ return this.negate().divide(b.negate());
+ }
+ return a.negate().divide(b).negate();
+ } else if (b.isNegative) {
+ return a.divide(b.negate()).negate();
+ }
+ result = base.Int64.zero;
+ } else {
+ if (!(b instanceof base.Uint64)) {
+ b = new base.Uint64(b.low, b.high);
+ }
+ if (b.compare(a) > 0) {
+ return base.Int64.zero;
+ }
+ if (b.compare(base.Utility._shiftRight(a, unsigned, 1)) > 0) {
+ return base.Uint64.one;
+ }
+ result = base.Uint64.zero;
+ }
+ remainder = a;
+ while (remainder.compare(b) >= 0) {
+ let approx = Math.max(1, Math.floor(remainder.toNumber() / b.toNumber()));
+ const log2 = Math.ceil(Math.log(approx) / Math.LN2);
+ const delta = (log2 <= 48) ? 1 : Math.pow(2, log2 - 48);
+ let approxResult = base.Int64.create(approx);
+ let approxRemainder = approxResult.multiply(b);
+ while (approxRemainder.isNegative || approxRemainder.compare(remainder) > 0) {
+ approx -= delta;
+ approxResult = unsigned ? base.Uint64.create(approx) : base.Int64.create(approx);
+ approxRemainder = approxResult.multiply(b);
+ }
+ if (approxResult.isZero) {
+ approxResult = base.Int64.one;
+ }
+ result = result.add(approxResult);
+ remainder = remainder.subtract(approxRemainder);
+ }
+ return result;
+ }
+
+ static text(value, unsigned, radix) {
+ const power = unsigned ? base.Uint64.create(Math.pow(radix, 6)) : base.Int64.create(Math.pow(radix, 6));
+ let remainder = value;
+ let result = '';
+ for (;;) {
+ const remainderDiv = remainder.divide(power);
+ const intval = remainder.subtract(remainderDiv.multiply(power)).toInteger() >>> 0;
+ let digits = intval.toString(radix);
+ remainder = remainderDiv;
+ if (remainder.low === 0 && remainder.high === 0) {
+ return digits + result;
+ }
+ while (digits.length < 6) {
+ digits = `0${digits}`;
+ }
+ result = `${digits}${result}`;
+ }
+ }
+
+ static _shiftLeft(value, unsigned, shift) {
+ return base.Utility._create(value.low << shift, (value.high << shift) | (value.low >>> (32 - shift)), unsigned);
+ }
+
+ static _shiftRight(value, unsigned, shift) {
+ return base.Utility._create((value.low >>> shift) | (value.high << (32 - shift)), value.high >> shift, unsigned);
+ }
+
+ static _create(low, high, unsigned) {
+ return unsigned ? new base.Uint64(low, high) : new base.Int64(low, high);
+ }
+};
+
+base.Uint64.zero = new base.Uint64(0, 0);
+base.Uint64.one = new base.Uint64(1, 0);
+base.Uint64.max = new base.Uint64(-1, -1);
+
+base.Complex64 = class Complex {
+
+ constructor(real, imaginary) {
+ this.real = real;
+ this.imaginary = imaginary;
+ }
+
+ static create(real, imaginary) {
+ return new base.Complex64(real, imaginary);
+ }
+
+ toString(/* radix */) {
+ return `${this.real} + ${this.imaginary}i`;
+ }
+};
+
+base.Complex128 = class Complex {
+
+ constructor(real, imaginary) {
+ this.real = real;
+ this.imaginary = imaginary;
+ }
+
+ static create(real, imaginary) {
+ return new base.Complex128(real, imaginary);
+ }
+
+ toString(/* radix */) {
+ return `${this.real} + ${this.imaginary}i`;
+ }
+};
+
+if (!DataView.prototype.getFloat16) {
+ DataView.prototype.getFloat16 = function(byteOffset, littleEndian) {
+ const value = this.getUint16(byteOffset, littleEndian);
+ const e = (value & 0x7C00) >> 10;
+ let f = value & 0x03FF;
+ if (e == 0) {
+ f = 0.00006103515625 * (f / 1024);
+ } else if (e == 0x1F) {
+ f = f ? NaN : Infinity;
+ } else {
+ f = DataView.__float16_pow[e] * (1 + (f / 1024));
+ }
+ return value & 0x8000 ? -f : f;
+ };
+ DataView.__float16_pow = {
+ 1: 1/16384, 2: 1/8192, 3: 1/4096, 4: 1/2048, 5: 1/1024, 6: 1/512, 7: 1/256, 8: 1/128,
+ 9: 1/64, 10: 1/32, 11: 1/16, 12: 1/8, 13: 1/4, 14: 1/2, 15: 1, 16: 2,
+ 17: 4, 18: 8, 19: 16, 20: 32, 21: 64, 22: 128, 23: 256, 24: 512,
+ 25: 1024, 26: 2048, 27: 4096, 28: 8192, 29: 16384, 30: 32768, 31: 65536
+ };
+}
+
+if (!DataView.prototype.setFloat16) {
+ DataView.prototype.setFloat16 = function(byteOffset, value, littleEndian) {
+ DataView.__float16_float[0] = value;
+ [value] = DataView.__float16_int;
+ const s = (value >>> 16) & 0x8000;
+ const e = (value >>> 23) & 0xff;
+ const f = value & 0x7fffff;
+ const v = s | DataView.__float16_base[e] | (f >> DataView.__float16_shift[e]);
+ this.setUint16(byteOffset, v, littleEndian);
+ };
+ DataView.__float16_float = new Float32Array(1);
+ DataView.__float16_int = new Uint32Array(DataView.__float16_float.buffer, 0, DataView.__float16_float.length);
+ DataView.__float16_base = new Uint32Array(256);
+ DataView.__float16_shift = new Uint32Array(256);
+ for (let i = 0; i < 256; ++i) {
+ const e = i - 127;
+ if (e < -27) {
+ DataView.__float16_base[i] = 0x0000;
+ DataView.__float16_shift[i] = 24;
+ } else if (e < -14) {
+ DataView.__float16_base[i] = 0x0400 >> -e - 14;
+ DataView.__float16_shift[i] = -e - 1;
+ } else if (e <= 15) {
+ DataView.__float16_base[i] = e + 15 << 10;
+ DataView.__float16_shift[i] = 13;
+ } else if (e < 128) {
+ DataView.__float16_base[i] = 0x7c00;
+ DataView.__float16_shift[i] = 24;
+ } else {
+ DataView.__float16_base[i] = 0x7c00;
+ DataView.__float16_shift[i] = 13;
+ }
+ }
+}
+
+if (!DataView.prototype.getBfloat16) {
+ DataView.prototype.getBfloat16 = function(byteOffset, littleEndian) {
+ if (littleEndian) {
+ DataView.__bfloat16_get_uint16_le[1] = this.getUint16(byteOffset, littleEndian);
+ return DataView.__bfloat16_get_float32_le[0];
+ }
+ DataView.__bfloat16_uint16_be[0] = this.getUint16(byteOffset, littleEndian);
+ return DataView.__bfloat16_get_float32_be[0];
+ };
+ DataView.__bfloat16_get_float32_le = new Float32Array(1);
+ DataView.__bfloat16_get_float32_be = new Float32Array(1);
+ DataView.__bfloat16_get_uint16_le = new Uint16Array(DataView.__bfloat16_get_float32_le.buffer, DataView.__bfloat16_get_float32_le.byteOffset, 2);
+ DataView.__bfloat16_get_uint16_be = new Uint16Array(DataView.__bfloat16_get_float32_be.buffer, DataView.__bfloat16_get_float32_be.byteOffset, 2);
+}
+
+DataView.__float8e4m3_float32 = new Float32Array(1);
+DataView.__float8e4m3_uint32 = new Uint32Array(DataView.__float8e4m3_float32.buffer, DataView.__float8e4m3_float32.byteOffset, 1);
+DataView.prototype.getFloat8e4m3 = function(byteOffset, fn, uz) {
+ const value = this.getUint8(byteOffset);
+ let exponent_bias = 7;
+ if (uz) {
+ exponent_bias = 8;
+ if (value == 0x80) {
+ return NaN;
+ }
+ } else if (value === 255) {
+ return -NaN;
+ } else if (value === 0x7f) {
+ return NaN;
+ }
+ let expo = (value & 0x78) >> 3;
+ let mant = value & 0x07;
+ const sign = value & 0x80;
+ let res = sign << 24;
+ if (expo == 0) {
+ if (mant > 0) {
+ expo = 0x7F - exponent_bias;
+ if (mant & 0x4 == 0) {
+ mant &= 0x3;
+ mant <<= 1;
+ expo -= 1;
+ }
+ if (mant & 0x4 == 0) {
+ mant &= 0x3;
+ mant <<= 1;
+ expo -= 1;
+ }
+ res |= (mant & 0x3) << 21;
+ res |= expo << 23;
+ }
+ } else {
+ res |= mant << 20;
+ expo += 0x7F - exponent_bias;
+ res |= expo << 23;
+ }
+ DataView.__float8e4m3_uint32[0] = res;
+ return DataView.__float8e4m3_float32[0];
+};
+
+DataView.__float8e5m2_float32 = new Float32Array(1);
+DataView.__float8e5m2_uint32 = new Uint32Array(DataView.__float8e5m2_float32.buffer, DataView.__float8e5m2_float32.byteOffset, 1);
+DataView.prototype.getFloat8e5m2 = function(byteOffset, fn, uz) {
+ const value = this.getUint8(byteOffset);
+ let exponent_bias = NaN;
+ if (fn && uz) {
+ if (value == 0x80) {
+ return NaN;
+ }
+ exponent_bias = 16;
+ } else if (!fn && !uz) {
+ if (value >= 253 && value <= 255) {
+ return -NaN;
+ }
+ if (value >= 126 && value <= 127) {
+ return NaN;
+ }
+ if (value === 252) {
+ return -Infinity;
+ }
+ if (value === 124) {
+ return Infinity;
+ }
+ exponent_bias = 15;
+ }
+ let expo = (value & 0x7C) >> 2;
+ let mant = value & 0x03;
+ let res = (value & 0x80) << 24;
+ if (expo == 0) {
+ if (mant > 0) {
+ expo = 0x7F - exponent_bias;
+ if (mant & 0x2 == 0) {
+ mant &= 0x1;
+ mant <<= 1;
+ expo -= 1;
+ }
+ res |= (mant & 0x1) << 22;
+ res |= expo << 23;
+ }
+ } else {
+ res |= mant << 21;
+ expo += 0x7F - exponent_bias;
+ res |= expo << 23;
+ }
+ DataView.__float8e5m2_uint32[0] = res;
+ return DataView.__float8e5m2_float32[0];
+};
+
+DataView.prototype.getInt64 = DataView.prototype.getInt64 || function(byteOffset, littleEndian) {
+ return littleEndian ?
+ new base.Int64(this.getUint32(byteOffset, true), this.getUint32(byteOffset + 4, true)) :
+ new base.Int64(this.getUint32(byteOffset + 4, true), this.getUint32(byteOffset, true));
+};
+
+DataView.prototype.setInt64 = DataView.prototype.setInt64 || function(byteOffset, value, littleEndian) {
+ if (littleEndian) {
+ this.setUint32(byteOffset, value.low, true);
+ this.setUint32(byteOffset + 4, value.high, true);
+ } else {
+ this.setUint32(byteOffset + 4, value.low, false);
+ this.setUint32(byteOffset, value.high, false);
+ }
+};
+
+DataView.prototype.getIntBits = DataView.prototype.getUintBits || function(offset, bits, littleEndian) {
+ offset = offset * bits;
+ const position = Math.floor(offset / 8);
+ const remainder = offset % 8;
+ let value = (remainder + bits) <= 8 ?
+ littleEndian ? this.getUint8(position) >> remainder /* TODO */ : this.getUint8(position) >> (8 - remainder - bits) :
+ littleEndian ? this.getUint16(position, true) >> remainder /* TODO */ : this.getUint16(position, false) >> (16 - remainder - bits);
+ value &= (1 << bits) - 1;
+ if (value & (1 << (bits - 1))) {
+ value -= 1 << bits;
+ }
+ return value;
+};
+
+DataView.prototype.getUint64 = DataView.prototype.getUint64 || function(byteOffset, littleEndian) {
+ return littleEndian ?
+ new base.Uint64(this.getUint32(byteOffset, true), this.getUint32(byteOffset + 4, true)) :
+ new base.Uint64(this.getUint32(byteOffset + 4, true), this.getUint32(byteOffset, true));
+};
+
+DataView.prototype.setUint64 = DataView.prototype.setUint64 || function(byteOffset, value, littleEndian) {
+ if (littleEndian) {
+ this.setUint32(byteOffset, value.low, true);
+ this.setUint32(byteOffset + 4, value.high, true);
+ } else {
+ this.setUint32(byteOffset + 4, value.low, false);
+ this.setUint32(byteOffset, value.high, false);
+ }
+};
+
+DataView.prototype.getUintBits = DataView.prototype.getUintBits || function(offset, bits, littleEndian) {
+ offset = offset * bits;
+ const position = Math.floor(offset / 8);
+ const remainder = offset % 8;
+ const value = (remainder + bits) <= 8 ?
+ littleEndian ? this.getUint8(position) >> remainder /* TODO */ : this.getUint8(position) >> (8 - remainder - bits) :
+ littleEndian ? this.getUint16(position, true) >> remainder /* TODO */ : this.getUint16(position, false) >> (16 - remainder - bits);
+ return value & ((1 << bits) - 1);
+};
+
+DataView.prototype.getComplex64 = DataView.prototype.getComplex64 || function(byteOffset, littleEndian) {
+ const real = littleEndian ? this.getFloat32(byteOffset, littleEndian) : this.getFloat32(byteOffset + 4, littleEndian);
+ const imaginary = littleEndian ? this.getFloat32(byteOffset + 4, littleEndian) : this.getFloat32(byteOffset, littleEndian);
+ return base.Complex64.create(real, imaginary);
+};
+
+DataView.prototype.setComplex64 = DataView.prototype.setComplex64 || function(byteOffset, value, littleEndian) {
+ if (littleEndian) {
+ this.setFloat32(byteOffset, value.real, littleEndian);
+ this.setFloat32(byteOffset + 4, value.imaginary, littleEndian);
+ } else {
+ this.setFloat32(byteOffset + 4, value.real, littleEndian);
+ this.setFloat32(byteOffset, value.imaginary, littleEndian);
+ }
+};
+
+DataView.prototype.getComplex128 = DataView.prototype.getComplex128 || function(byteOffset, littleEndian) {
+ const real = littleEndian ? this.getFloat64(byteOffset, littleEndian) : this.getFloat64(byteOffset + 8, littleEndian);
+ const imaginary = littleEndian ? this.getFloat64(byteOffset + 8, littleEndian) : this.getFloat64(byteOffset, littleEndian);
+ return base.Complex128.create(real, imaginary);
+};
+
+DataView.prototype.setComplex128 = DataView.prototype.setComplex128 || function(byteOffset, value, littleEndian) {
+ if (littleEndian) {
+ this.setFloat64(byteOffset, value.real, littleEndian);
+ this.setFloat64(byteOffset + 8, value.imaginary, littleEndian);
+ } else {
+ this.setFloat64(byteOffset + 8, value.real, littleEndian);
+ this.setFloat64(byteOffset, value.imaginary, littleEndian);
+ }
+};
+
+base.BinaryStream = class {
+
+ constructor(buffer) {
+ this._buffer = buffer;
+ this._length = buffer.length;
+ this._position = 0;
+ }
+
+ get position() {
+ return this._position;
+ }
+
+ get length() {
+ return this._length;
+ }
+
+ stream(length) {
+ const buffer = this.read(length);
+ return new base.BinaryStream(buffer.slice(0));
+ }
+
+ seek(position) {
+ this._position = position >= 0 ? position : this._length + position;
+ if (this._position > this._buffer.length) {
+ throw new Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`);
+ }
+ }
+
+ skip(offset) {
+ this._position += offset;
+ if (this._position > this._buffer.length) {
+ throw new Error(`Expected ${this._position - this._buffer.length} more bytes. The file might be corrupted. Unexpected end of file.`);
+ }
+ }
+
+ peek(length) {
+ if (this._position === 0 && length === undefined) {
+ return this._buffer;
+ }
+ const position = this._position;
+ this.skip(length !== undefined ? length : this._length - this._position);
+ const end = this._position;
+ this.seek(position);
+ return this._buffer.subarray(position, end);
+ }
+
+ read(length) {
+ if (this._position === 0 && length === undefined) {
+ this._position = this._length;
+ return this._buffer;
+ }
+ const position = this._position;
+ this.skip(length !== undefined ? length : this._length - this._position);
+ return this._buffer.subarray(position, this._position);
+ }
+
+ byte() {
+ const position = this._position;
+ this.skip(1);
+ return this._buffer[position];
+ }
+};
+
+base.BinaryReader = class {
+
+ constructor(data, littleEndian) {
+ this._buffer = data instanceof Uint8Array ? data : data.peek();
+ this._littleEndian = littleEndian !== false;
+ this._position = 0;
+ this._length = this._buffer.length;
+ this._view = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength);
+ }
+
+ get length() {
+ return this._length;
+ }
+
+ get position() {
+ return this._position;
+ }
+
+ seek(position) {
+ this._position = position >= 0 ? position : this._length + position;
+ if (this._position > this._length || this._position < 0) {
+ throw new Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`);
+ }
+ }
+
+ skip(offset) {
+ this._position += offset;
+ if (this._position > this._length) {
+ throw new Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`);
+ }
+ }
+
+ align(mod) {
+ if (this._position % mod != 0) {
+ this.skip(mod - (this._position % mod));
+ }
+ }
+
+ peek(length) {
+ if (this._position === 0 && length === undefined) {
+ return this._buffer;
+ }
+ const position = this._position;
+ this.skip(length !== undefined ? length : this._length - this._position);
+ const end = this._position;
+ this._position = position;
+ return this._buffer.slice(position, end);
+ }
+
+ read(length) {
+ if (this._position === 0 && length === undefined) {
+ this._position = this._length;
+ return this._buffer;
+ }
+ const position = this._position;
+ this.skip(length !== undefined ? length : this._length - this._position);
+ return this._buffer.slice(position, this._position);
+ }
+
+ byte() {
+ const position = this._position;
+ this.skip(1);
+ return this._buffer[position];
+ }
+
+ int8() {
+ const position = this._position;
+ this.skip(1);
+ return this._view.getInt8(position, this._littleEndian);
+ }
+
+ int16() {
+ const position = this._position;
+ this.skip(2);
+ return this._view.getInt16(position, this._littleEndian);
+ }
+
+ int32() {
+ const position = this._position;
+ this.skip(4);
+ return this._view.getInt32(position, this._littleEndian);
+ }
+
+ int64() {
+ const position = this._position;
+ this.skip(8);
+ return this._view.getInt64(position, this._littleEndian).toNumber();
+ }
+
+ uint16() {
+ const position = this._position;
+ this.skip(2);
+ return this._view.getUint16(position, this._littleEndian);
+ }
+
+ uint32() {
+ const position = this._position;
+ this.skip(4);
+ return this._view.getUint32(position, this._littleEndian);
+ }
+
+ uint64() {
+ const position = this._position;
+ this.skip(8);
+ const low = this._view.getUint32(position, this._littleEndian);
+ const high = this._view.getUint32(position + 4, this._littleEndian);
+ if (high === 0) {
+ return low;
+ }
+ const value = (high * 4294967296) + low;
+ if (Number.isSafeInteger(value)) {
+ return value;
+ }
+ throw new Error("Unsigned 64-bit value exceeds safe integer.");
+ }
+
+ float32() {
+ const position = this._position;
+ this.skip(4);
+ return this._view.getFloat32(position, this._littleEndian);
+ }
+
+ float64() {
+ const position = this._position;
+ this.skip(8);
+ return this._view.getFloat64(position, this._littleEndian);
+ }
+
+ string() {
+ const length = this.uint32();
+ const position = this._position;
+ this.skip(length);
+ const data = this._buffer.subarray(position, this._position);
+ this._decoder = this._decoder || new TextDecoder('utf-8');
+ return this._decoder.decode(data);
+ }
+
+ boolean() {
+ return this.byte() !== 0 ? true : false;
+ }
+};
+
+base.StreamReader = class {
+
+ constructor(stream, littleEndian) {
+ this._stream = stream;
+ this._littleEndian = littleEndian !== false;
+ this._buffer = new Uint8Array(8);
+ this._view = new DataView(this._buffer.buffer, this._buffer.byteOffset, this._buffer.byteLength);
+ }
+
+ get position() {
+ return this._stream.position;
+ }
+
+ get length() {
+ return this._stream.length;
+ }
+
+ seek(position) {
+ this._stream.seek(position);
+ }
+
+ skip(position) {
+ this._stream.skip(position);
+ }
+
+ stream(length) {
+ return this._stream.stream(length);
+ }
+
+ read(length) {
+ return this._stream.read(length);
+ }
+
+ byte() {
+ return this._stream.byte();
+ }
+
+ int16() {
+ const buffer = this._stream.read(2);
+ this._buffer.set(buffer, 0);
+ return this._view.getInt16(0, this._littleEndian);
+ }
+
+ int32() {
+ const buffer = this._stream.read(4);
+ this._buffer.set(buffer, 0);
+ return this._view.getInt32(0, this._littleEndian);
+ }
+
+ uint16() {
+ const buffer = this._stream.read(2);
+ this._buffer.set(buffer, 0);
+ return this._view.getUint16(0, this._littleEndian);
+ }
+
+ uint32() {
+ const buffer = this._stream.read(4);
+ this._buffer.set(buffer, 0);
+ return this._view.getUint32(0, this._littleEndian);
+ }
+
+ uint64() {
+ const low = this.uint32();
+ const high = this.uint32();
+ if (high === 0) {
+ return low;
+ }
+ const value = (high * 4294967296) + low;
+ if (Number.isSafeInteger(value)) {
+ return value;
+ }
+ throw new Error("Unsigned 64-bit value exceeds safe integer.");
+ }
+
+ float32() {
+ const buffer = this._stream.read(4);
+ this._buffer.set(buffer, 0);
+ return this._view.getFloat32(0, this._littleEndian);
+ }
+};
+
+base.Telemetry = class {
+
+ constructor(window) {
+ this._window = window;
+ this._navigator = window.navigator;
+ this._config = new Map();
+ this._metadata = {};
+ this._schema = new Map([
+ [ 'protocol_version', 'v' ],
+ [ 'tracking_id', 'tid' ],
+ [ 'hash_info', 'gtm' ],
+ [ '_page_id', '_p'],
+ [ 'client_id', 'cid' ],
+ [ 'language', 'ul' ],
+ [ 'screen_resolution', 'sr' ],
+ [ '_user_agent_architecture', 'uaa' ],
+ [ '_user_agent_bitness', 'uab' ],
+ [ '_user_agent_full_version_list', 'uafvl' ],
+ [ '_user_agent_mobile', 'uamb' ],
+ [ '_user_agent_model', 'uam' ],
+ [ '_user_agent_platform', 'uap' ],
+ [ '_user_agent_platform_version', 'uapv' ],
+ [ '_user_agent_wow64', 'uaw' ],
+ [ 'hit_count', '_s' ],
+ [ 'session_id', 'sid' ],
+ [ 'session_number', 'sct' ],
+ [ 'session_engaged', 'seg' ],
+ [ 'engagement_time_msec', '_et' ],
+ [ 'page_location', 'dl' ],
+ [ 'page_title', 'dt' ],
+ [ 'page_referrer', 'dr' ],
+ [ 'is_first_visit', '_fv' ],
+ [ 'is_external_event', '_ee' ],
+ [ 'is_new_to_site', '_nsi' ],
+ [ 'is_session_start', '_ss' ],
+ [ 'event_name', 'en' ]
+ ]);
+ }
+
+ async start(measurement_id, client_id, session) {
+ this._session = session && typeof session === 'string' ? session.replace(/^GS1\.1\./, '').split('.') : null;
+ this._session = Array.isArray(this._session) && this._session.length >= 7 ? this._session : [ '0', '0', '0', '0', '0', '0', '0' ];
+ this._session[0] = Date.now();
+ this._session[1] = parseInt(this._session[1], 10) + 1;
+ this._engagement_time_msec = 0;
+ if (this._config.size > 0) {
+ throw new Error('Invalid session state.');
+ }
+ this.set('protocol_version', 2);
+ this.set('tracking_id', measurement_id);
+ this.set('hash_info', '2oebu0');
+ this.set('_page_id', Math.floor(Math.random() * 2147483648));
+ client_id = client_id ? client_id.replace(/^(GA1\.\d\.)*/, '') : null;
+ if (client_id && client_id.indexOf('.') !== 1) {
+ this.set('client_id', client_id);
+ } else {
+ const random = String(Math.round(0x7FFFFFFF * Math.random()));
+ const time = Date.now();
+ const value = [ random, Math.round(time / 1e3) ].join('.');
+ this.set('client_id', value);
+ this._metadata.is_first_visit = 1;
+ this._metadata.is_new_to_site = 1;
+ }
+ this.set('language', ((this._navigator && (this._navigator.language || this._navigator.browserLanguage)) || '').toLowerCase());
+ this.set('screen_resolution', `${window.screen ? window.screen.width : 0}x${window.screen ? window.screen.height : 0}`);
+ if (this._navigator && this._navigator.userAgentData && this._navigator.userAgentData.getHighEntropyValues) {
+ const values = await this._navigator.userAgentData.getHighEntropyValues([ 'platform', 'platformVersion', 'architecture', 'model', 'uaFullVersion', 'bitness', 'fullVersionList', 'wow64' ]);
+ if (values) {
+ this.set('_user_agent_architecture', values.architecture);
+ this.set('_user_agent_bitness', values.bitness);
+ this.set('_user_agent_full_version_list', Array.isArray(values.fullVersionList) ? values.fullVersionList.map((h) => `${encodeURIComponent(h.brand || '')};${encodeURIComponent(h.version || '')}`).join('|') : '');
+ this.set('_user_agent_mobile', values.mobile ? 1 : 0);
+ this.set('_user_agent_model', values.model);
+ this.set('_user_agent_platform', values.platform);
+ this.set('_user_agent_platform_version', values.platformVersion);
+ this.set('_user_agent_wow64', values.wow64 ? 1 : 0);
+ }
+ }
+ this.set('hit_count', 1);
+ this.set('session_id', this._session[0]);
+ this.set('session_number', this._session[1]);
+ this.set('session_engaged', 0);
+ this._metadata.is_session_start = 1;
+ this._metadata.is_external_event = 1;
+ window.addEventListener('focus', () => this._update(true, undefined, undefined));
+ window.addEventListener('blur', () => this._update(false, undefined, undefined));
+ window.addEventListener('pageshow', () => this._update(undefined, true, undefined));
+ window.addEventListener('pagehide', () => this._update(undefined, false, undefined));
+ window.addEventListener('visibilitychange', () => this._update(undefined, undefined, window.document.visibilityState !== 'hidden'));
+ window.addEventListener('beforeunload', () => this._update() && this.send('user_engagement', {}));
+ }
+
+ get session() {
+ return this._session ? this._session.join('.') : null;
+ }
+
+ set(name, value) {
+ const key = this._schema.get(name);
+ if (value !== undefined && value !== null) {
+ this._config.set(key, value);
+ } else if (this._config.has(key)) {
+ this._config.delete(key);
+ }
+ this._cache = null;
+ }
+
+ get(name) {
+ const key = this._schema.get(name);
+ return this._config.get(key);
+ }
+
+ send(name, params) {
+ if (this._session) {
+ try {
+ params = Object.assign({ event_name: name }, this._metadata, /* { debug_mode: true },*/ params);
+ this._metadata = {};
+ if (this._update()) {
+ params.engagement_time_msec = this._engagement_time_msec;
+ this._engagement_time_msec = 0;
+ }
+ const build = (entries) => entries.map(([name, value]) => `${name}=${encodeURIComponent(value)}`).join('&');
+ this._cache = this._cache || build(Array.from(this._config));
+ const key = (name, value) => this._schema.get(name) || ('number' === typeof value && !isNaN(value) ? 'epn.' : 'ep.') + name;
+ const body = build(Object.entries(params).map(([name, value]) => [ key(name, value), value ]));
+ const url = `https://analytics.google.com/g/collect?${this._cache}`;
+ this._navigator.sendBeacon(url, body);
+ this._session[2] = this.get('session_engaged') || '0';
+ this.set('hit_count', this.get('hit_count') + 1);
+ } catch (e) {
+ // continue regardless of error
+ }
+ }
+ }
+
+ _update(focused, page, visible) {
+ this._focused = focused === true || focused === false ? focused : this._focused;
+ this._page = page === true || page === false ? page : this._page;
+ this._visible = visible === true || visible === false ? visible : this._visible;
+ const time = Date.now();
+ if (this._start_time) {
+ this._engagement_time_msec += (time - this._start_time);
+ this._start_time = 0;
+ }
+ if (this._focused !== false && this._page !== false && this._visible !== false) {
+ this._start_time = time;
+ }
+ return this._engagement_time_msec > 20;
+ }
+};
+
+base.Metadata = class {
+
+ get extensions() {
+ return [
+ 'onnx', 'tflite', 'pb', 'pt', 'pt2', 'pth', 'h5', 'pbtxt', 'prototxt', 'caffemodel', 'mlmodel', 'mlpackage',
+ 'model', 'json', 'xml', 'cfg', 'weights', 'bin',
+ 'ort',
+ 'dnn', 'cmf',
+ 'gguf',
+ 'hd5', 'hdf5', 'keras',
+ 'tfl', 'circle', 'lite',
+ 'mlnet', 'mar', 'maxviz', 'meta', 'nn', 'ngf', 'hn', 'har',
+ 'param', 'params',
+ 'paddle', 'pdiparams', 'pdmodel', 'pdopt', 'pdparams', 'nb',
+ 'pkl', 'joblib', 'safetensors',
+ 'ptl', 't7',
+ 'dlc', 'uff', 'armnn',
+ 'mnn', 'ms', 'ncnn', 'om', 'tm', 'mge', 'tmfile', 'tnnproto', 'xmodel', 'kmodel', 'rknn',
+ 'tar', 'zip'
+ ];
+ }
+};
+
+if (typeof window !== 'undefined' && typeof window.Long != 'undefined') {
+ window.long = { Long: window.Long };
+ window.Int64 = base.Int64;
+ window.Uint64 = base.Uint64;
+}
+
+export const Int64 = base.Int64;
+export const Uint64 = base.Uint64;
+export const Complex64 = base.Complex64;
+export const Complex128 = base.Complex128;
+export const BinaryStream = base.BinaryStream;
+export const BinaryReader = base.BinaryReader;
+export const StreamReader = base.StreamReader;
+export const Telemetry = base.Telemetry;
+export const Metadata = base.Metadata;
diff --git a/bigdl-metadata.json b/bigdl-metadata.json
new file mode 100644
index 00000000000..d42b79c7e68
--- /dev/null
+++ b/bigdl-metadata.json
@@ -0,0 +1,95 @@
+[
+ {
+ "name": "com.intel.analytics.bigdl.nn.Dropout",
+ "category": "Dropout"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.InferReshape",
+ "category": "Shape"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.JoinTable",
+ "category": "Tensor",
+ "inputs": [
+ { "name": "inputs", "list": true }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.Linear",
+ "category": "Layer",
+ "inputs": [
+ { "name": "inputs" },
+ { "name": "weight" },
+ { "name": "bias" }
+ ]
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.NormalizeScale",
+ "category": "Normalization",
+ "inputs": [
+ { "name": "inputs" },
+ { "name": "w" }
+ ]
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.ReLU",
+ "category": "Activation"
+ },
+ {
+ "name": "Scale",
+ "category": "Layer",
+ "inputs": [
+ { "name": "inputs" },
+ { "name": "weight" },
+ { "name": "bias" }
+ ]
+ },
+ {
+ "name": "SoftMax",
+ "category": "Activation"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.SpatialAveragePooling",
+ "category": "Pool"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.SpatialBatchNormalization",
+ "category": "Normalization"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.quantized.SpatialConvolution",
+ "category": "Layer",
+ "inputs": [
+ { "name": "inputs" },
+ { "name": "weight" },
+ { "name": "bias" }
+ ]
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.SpatialCrossMapLRN",
+ "category": "Normalization"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.SpatialDilatedConvolution",
+ "category": "Layer",
+ "inputs": [
+ { "name": "inputs" },
+ { "name": "weight" },
+ { "name": "bias" }
+ ]
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.SpatialMaxPooling",
+ "category": "Pool"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.Transpose",
+ "category": "Shape"
+ },
+ {
+ "name": "com.intel.analytics.bigdl.nn.View"
+ }
+]
\ No newline at end of file
diff --git a/bigdl-proto.js b/bigdl-proto.js
new file mode 100644
index 00000000000..c8410c7af60
--- /dev/null
+++ b/bigdl-proto.js
@@ -0,0 +1,641 @@
+
+import * as protobuf from './protobuf.js';
+
+const $root = protobuf.get('bigdl');
+
+$root.com = {};
+
+$root.com.intel = {};
+
+$root.com.intel.analytics = {};
+
+$root.com.intel.analytics.bigdl = {};
+
+$root.com.intel.analytics.bigdl.serialization = {};
+
+$root.com.intel.analytics.bigdl.serialization.BigDLModule = class BigDLModule {
+
+ constructor() {
+ this.subModules = [];
+ this.preModules = [];
+ this.nextModules = [];
+ this.attr = {};
+ this.parameters = [];
+ this.inputScales = [];
+ this.outputScales = [];
+ this.weightScales = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.BigDLModule();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 2:
+ message.subModules.push($root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32()));
+ break;
+ case 3:
+ message.weight = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32());
+ break;
+ case 4:
+ message.bias = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32());
+ break;
+ case 5:
+ message.preModules.push(reader.string());
+ break;
+ case 6:
+ message.nextModules.push(reader.string());
+ break;
+ case 7:
+ message.moduleType = reader.string();
+ break;
+ case 8:
+ reader.entry(message.attr, () => reader.string(), () => $root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()));
+ break;
+ case 9:
+ message.version = reader.string();
+ break;
+ case 10:
+ message.train = reader.bool();
+ break;
+ case 11:
+ message.namePostfix = reader.string();
+ break;
+ case 12:
+ message.id = reader.int32();
+ break;
+ case 13:
+ message.inputShape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32());
+ break;
+ case 14:
+ message.outputShape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32());
+ break;
+ case 15:
+ message.hasParameters = reader.bool();
+ break;
+ case 16:
+ message.parameters.push($root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()));
+ break;
+ case 17:
+ message.isMklInt8Enabled = reader.bool();
+ break;
+ case 18:
+ message.inputDimMasks = reader.int32();
+ break;
+ case 19:
+ message.inputScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()));
+ break;
+ case 20:
+ message.outputDimMasks = reader.int32();
+ break;
+ case 21:
+ message.outputScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()));
+ break;
+ case 22:
+ message.weightDimMasks = reader.int32();
+ break;
+ case 23:
+ message.weightScales.push($root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.name = "";
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.weight = null;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.bias = null;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.moduleType = "";
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.version = "";
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.train = false;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.namePostfix = "";
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.id = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.inputShape = null;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.outputShape = null;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.hasParameters = false;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.isMklInt8Enabled = false;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.inputDimMasks = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.outputDimMasks = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLModule.prototype.weightDimMasks = 0;
+
+$root.com.intel.analytics.bigdl.serialization.VarFormat = {
+ "EMPTY_FORMAT": 0,
+ "DEFAULT": 1,
+ "ONE_D": 2,
+ "IN_OUT": 3,
+ "OUT_IN": 4,
+ "IN_OUT_KW_KH": 5,
+ "OUT_IN_KW_KH": 6,
+ "GP_OUT_IN_KW_KH": 7,
+ "GP_IN_OUT_KW_KH": 8,
+ "OUT_IN_KT_KH_KW": 9
+};
+
+$root.com.intel.analytics.bigdl.serialization.InitMethodType = {
+ "EMPTY_INITIALIZATION": 0,
+ "RANDOM_UNIFORM": 1,
+ "RANDOM_UNIFORM_PARAM": 2,
+ "RANDOM_NORMAL": 3,
+ "ZEROS": 4,
+ "ONES": 5,
+ "CONST": 6,
+ "XAVIER": 7,
+ "BILINEARFILLER": 8
+};
+
+$root.com.intel.analytics.bigdl.serialization.RegularizerType = {
+ "L1L2Regularizer": 0,
+ "L1Regularizer": 1,
+ "L2Regularizer": 2
+};
+
+$root.com.intel.analytics.bigdl.serialization.InputDataFormat = {
+ "NCHW": 0,
+ "NHWC": 1
+};
+
+$root.com.intel.analytics.bigdl.serialization.TensorType = {
+ "DENSE": 0,
+ "QUANT": 1
+};
+
+$root.com.intel.analytics.bigdl.serialization.InitMethod = class InitMethod {
+
+ constructor() {
+ this.data = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.InitMethod();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.methodType = reader.int32();
+ break;
+ case 2:
+ message.data = reader.doubles(message.data, tag);
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.InitMethod.prototype.methodType = 0;
+
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor = class BigDLTensor {
+
+ constructor() {
+ this.size = [];
+ this.stride = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.BigDLTensor();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.datatype = reader.int32();
+ break;
+ case 2:
+ message.size = reader.array(message.size, () => reader.int32(), tag);
+ break;
+ case 3:
+ message.stride = reader.array(message.stride, () => reader.int32(), tag);
+ break;
+ case 4:
+ message.offset = reader.int32();
+ break;
+ case 5:
+ message.dimension = reader.int32();
+ break;
+ case 6:
+ message.nElements = reader.int32();
+ break;
+ case 7:
+ message.isScalar = reader.bool();
+ break;
+ case 8:
+ message.storage = $root.com.intel.analytics.bigdl.serialization.TensorStorage.decode(reader, reader.uint32());
+ break;
+ case 9:
+ message.id = reader.int32();
+ break;
+ case 10:
+ message.tensorType = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.datatype = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.offset = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.dimension = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.nElements = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.isScalar = false;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.storage = null;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.id = 0;
+$root.com.intel.analytics.bigdl.serialization.BigDLTensor.prototype.tensorType = 0;
+
+$root.com.intel.analytics.bigdl.serialization.TensorStorage = class TensorStorage {
+
+ constructor() {
+ this.float_data = [];
+ this.double_data = [];
+ this.bool_data = [];
+ this.string_data = [];
+ this.int_data = [];
+ this.long_data = [];
+ this.bytes_data = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.TensorStorage();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.datatype = reader.int32();
+ break;
+ case 2:
+ message.float_data = reader.floats(message.float_data, tag);
+ break;
+ case 3:
+ message.double_data = reader.doubles(message.double_data, tag);
+ break;
+ case 4:
+ message.bool_data = reader.array(message.bool_data, () => reader.bool(), tag);
+ break;
+ case 5:
+ message.string_data.push(reader.string());
+ break;
+ case 6:
+ message.int_data = reader.array(message.int_data, () => reader.int32(), tag);
+ break;
+ case 7:
+ message.long_data = reader.array(message.long_data, () => reader.int64(), tag);
+ break;
+ case 8:
+ message.bytes_data.push(reader.bytes());
+ break;
+ case 9:
+ message.id = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.TensorStorage.prototype.datatype = 0;
+$root.com.intel.analytics.bigdl.serialization.TensorStorage.prototype.id = 0;
+
+$root.com.intel.analytics.bigdl.serialization.Regularizer = class Regularizer {
+
+ constructor() {
+ this.regularData = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.Regularizer();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.regularizerType = reader.int32();
+ break;
+ case 2:
+ message.regularData = reader.doubles(message.regularData, tag);
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.Regularizer.prototype.regularizerType = 0;
+
+$root.com.intel.analytics.bigdl.serialization.DataType = {
+ "INT32": 0,
+ "INT64": 1,
+ "FLOAT": 2,
+ "DOUBLE": 3,
+ "STRING": 4,
+ "BOOL": 5,
+ "CHAR": 6,
+ "SHORT": 7,
+ "BYTES": 8,
+ "REGULARIZER": 9,
+ "TENSOR": 10,
+ "VARIABLE_FORMAT": 11,
+ "INITMETHOD": 12,
+ "MODULE": 13,
+ "NAME_ATTR_LIST": 14,
+ "ARRAY_VALUE": 15,
+ "DATA_FORMAT": 16,
+ "CUSTOM": 17,
+ "SHAPE": 18
+};
+
+$root.com.intel.analytics.bigdl.serialization.AttrValue = class AttrValue {
+
+ constructor() {
+ }
+
+ get value() {
+ $root.com.intel.analytics.bigdl.serialization.AttrValue.valueSet = $root.com.intel.analytics.bigdl.serialization.AttrValue.valueSet || new Set([ "int32Value", "int64Value", "floatValue", "doubleValue", "stringValue", "boolValue", "regularizerValue", "tensorValue", "variableFormatValue", "initMethodValue", "bigDLModuleValue", "nameAttrListValue", "arrayValue", "dataFormatValue", "customValue", "shape"]);
+ return Object.keys(this).find((key) => $root.com.intel.analytics.bigdl.serialization.AttrValue.valueSet.has(key) && this[key] != null);
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.AttrValue();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.dataType = reader.int32();
+ break;
+ case 2:
+ message.subType = reader.string();
+ break;
+ case 3:
+ message.int32Value = reader.int32();
+ break;
+ case 4:
+ message.int64Value = reader.int64();
+ break;
+ case 5:
+ message.floatValue = reader.float();
+ break;
+ case 6:
+ message.doubleValue = reader.double();
+ break;
+ case 7:
+ message.stringValue = reader.string();
+ break;
+ case 8:
+ message.boolValue = reader.bool();
+ break;
+ case 9:
+ message.regularizerValue = $root.com.intel.analytics.bigdl.serialization.Regularizer.decode(reader, reader.uint32());
+ break;
+ case 10:
+ message.tensorValue = $root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32());
+ break;
+ case 11:
+ message.variableFormatValue = reader.int32();
+ break;
+ case 12:
+ message.initMethodValue = $root.com.intel.analytics.bigdl.serialization.InitMethod.decode(reader, reader.uint32());
+ break;
+ case 13:
+ message.bigDLModuleValue = $root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32());
+ break;
+ case 14:
+ message.nameAttrListValue = $root.com.intel.analytics.bigdl.serialization.NameAttrList.decode(reader, reader.uint32());
+ break;
+ case 15:
+ message.arrayValue = $root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.decode(reader, reader.uint32());
+ break;
+ case 16:
+ message.dataFormatValue = reader.int32();
+ break;
+ case 17:
+ message.customValue = $root.google.protobuf.Any.decode(reader, reader.uint32());
+ break;
+ case 18:
+ message.shape = $root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.AttrValue.prototype.dataType = 0;
+$root.com.intel.analytics.bigdl.serialization.AttrValue.prototype.subType = "";
+
+$root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue = class ArrayValue {
+
+ constructor() {
+ this.i32 = [];
+ this.i64 = [];
+ this.flt = [];
+ this.dbl = [];
+ this.str = [];
+ this.boolean = [];
+ this.Regularizer = [];
+ this.tensor = [];
+ this.variableFormat = [];
+ this.initMethod = [];
+ this.bigDLModule = [];
+ this.nameAttrList = [];
+ this.dataFormat = [];
+ this.custom = [];
+ this.shape = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.size = reader.int32();
+ break;
+ case 2:
+ message.datatype = reader.int32();
+ break;
+ case 3:
+ message.i32 = reader.array(message.i32, () => reader.int32(), tag);
+ break;
+ case 4:
+ message.i64 = reader.array(message.i64, () => reader.int64(), tag);
+ break;
+ case 5:
+ message.flt = reader.floats(message.flt, tag);
+ break;
+ case 6:
+ message.dbl = reader.doubles(message.dbl, tag);
+ break;
+ case 7:
+ message.str.push(reader.string());
+ break;
+ case 8:
+ message.boolean = reader.array(message.boolean, () => reader.bool(), tag);
+ break;
+ case 9:
+ message.Regularizer.push($root.com.intel.analytics.bigdl.serialization.Regularizer.decode(reader, reader.uint32()));
+ break;
+ case 10:
+ message.tensor.push($root.com.intel.analytics.bigdl.serialization.BigDLTensor.decode(reader, reader.uint32()));
+ break;
+ case 11:
+ message.variableFormat = reader.array(message.variableFormat, () => reader.int32(), tag);
+ break;
+ case 12:
+ message.initMethod.push($root.com.intel.analytics.bigdl.serialization.InitMethod.decode(reader, reader.uint32()));
+ break;
+ case 13:
+ message.bigDLModule.push($root.com.intel.analytics.bigdl.serialization.BigDLModule.decode(reader, reader.uint32()));
+ break;
+ case 14:
+ message.nameAttrList.push($root.com.intel.analytics.bigdl.serialization.NameAttrList.decode(reader, reader.uint32()));
+ break;
+ case 15:
+ message.dataFormat = reader.array(message.dataFormat, () => reader.int32(), tag);
+ break;
+ case 16:
+ message.custom.push($root.google.protobuf.Any.decode(reader, reader.uint32()));
+ break;
+ case 17:
+ message.shape.push($root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.prototype.size = 0;
+$root.com.intel.analytics.bigdl.serialization.AttrValue.ArrayValue.prototype.datatype = 0;
+
+$root.com.intel.analytics.bigdl.serialization.NameAttrList = class NameAttrList {
+
+ constructor() {
+ this.attr = {};
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.NameAttrList();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 2:
+ reader.entry(message.attr, () => reader.string(), () => $root.com.intel.analytics.bigdl.serialization.AttrValue.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.NameAttrList.prototype.name = "";
+
+$root.com.intel.analytics.bigdl.serialization.Shape = class Shape {
+
+ constructor() {
+ this.shapeValue = [];
+ this.shape = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.com.intel.analytics.bigdl.serialization.Shape();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.shapeType = reader.int32();
+ break;
+ case 2:
+ message.ssize = reader.int32();
+ break;
+ case 3:
+ message.shapeValue = reader.array(message.shapeValue, () => reader.int32(), tag);
+ break;
+ case 4:
+ message.shape.push($root.com.intel.analytics.bigdl.serialization.Shape.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.com.intel.analytics.bigdl.serialization.Shape.prototype.shapeType = 0;
+$root.com.intel.analytics.bigdl.serialization.Shape.prototype.ssize = 0;
+
+$root.com.intel.analytics.bigdl.serialization.Shape.ShapeType = {
+ "SINGLE": 0,
+ "MULTI": 1
+};
+
+$root.google = {};
+
+$root.google.protobuf = {};
+
+$root.google.protobuf.Any = class Any {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.google.protobuf.Any();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.type_url = reader.string();
+ break;
+ case 2:
+ message.value = reader.bytes();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.google.protobuf.Any.prototype.type_url = "";
+$root.google.protobuf.Any.prototype.value = new Uint8Array([]);
diff --git a/bigdl.js b/bigdl.js
new file mode 100644
index 00000000000..e89fcdc5825
--- /dev/null
+++ b/bigdl.js
@@ -0,0 +1,307 @@
+
+// Experimental
+
+import * as protobuf from './protobuf.js';
+
+const bigdl = {};
+
+bigdl.ModelFactory = class {
+
+ match(context) {
+ const tags = context.tags('pb');
+ if (tags.has(2) && tags.has(7) && tags.has(8) && tags.has(9) && tags.has(10) && tags.has(11) && tags.has(12)) {
+ return 'bigdl';
+ }
+ return '';
+ }
+
+ async open(context) {
+ await context.require('./bigdl-proto');
+ let module = null;
+ try {
+ // https://github.com/intel-analytics/BigDL/blob/master/spark/dl/src/main/resources/serialization/bigdl.proto
+ bigdl.proto = protobuf.get('bigdl').com.intel.analytics.bigdl.serialization;
+ const stream = context.stream;
+ const reader = protobuf.BinaryReader.open(stream);
+ module = bigdl.proto.BigDLModule.decode(reader);
+ } catch (error) {
+ const message = error && error.message ? error.message : error.toString();
+ throw new bigdl.Error(`File format is not bigdl.BigDLModule (${message.replace(/\.$/, '')}).`);
+ }
+ const metadata = await context.metadata('bigdl-metadata.json');
+ return new bigdl.Model(metadata, module);
+ }
+};
+
+bigdl.Model = class {
+
+ constructor(metadata, module) {
+ const version = module && module.version ? module.version : '';
+ this.format = `BigDL${version ? ` v${version}` : ''}`;
+ this.graphs = [ new bigdl.Graph(metadata, module) ];
+ }
+};
+
+bigdl.Graph = class {
+
+ constructor(metadata, module) {
+ this.type = module.moduleType;
+ this.inputs = [];
+ this.outputs = [];
+ this.nodes = [];
+ const tensors = module.attr && module.attr.global_storage && module.attr.global_storage.nameAttrListValue && module.attr.global_storage.nameAttrListValue.attr ? module.attr.global_storage.nameAttrListValue.attr : {};
+ const values = new Map();
+ values.map = (name) => {
+ if (!values.has(name)) {
+ values.set(name, new bigdl.Value(name));
+ }
+ return values.get(name);
+ };
+ const loadModule = (metadata, module, tensors) => {
+ switch (module.moduleType) {
+ case 'com.intel.analytics.bigdl.nn.StaticGraph':
+ case 'com.intel.analytics.bigdl.nn.Sequential': {
+ for (const submodule of module.subModules) {
+ loadModule(metadata, submodule, tensors);
+ }
+ break;
+ }
+ case 'com.intel.analytics.bigdl.nn.Input': {
+ const argument = new bigdl.Argument(module.name, [ values.map(module.name) ]);
+ this.inputs.push(argument);
+ break;
+ }
+ default: {
+ const node = new bigdl.Node(metadata, module, tensors, values);
+ this.nodes.push(node);
+ break;
+ }
+ }
+ };
+ loadModule(metadata, module, tensors);
+ }
+};
+
+bigdl.Argument = class {
+
+ constructor(name, value) {
+ this.name = name;
+ this.value = value;
+ }
+};
+
+bigdl.Value = class {
+
+ constructor(name, type, initializer) {
+ if (typeof name !== 'string') {
+ throw new bigdl.Error(`Invalid value identifier '${JSON.stringify(name)}'.`);
+ }
+ this.name = name;
+ this.type = type ? type : initializer ? initializer.type : null;
+ this.initializer = initializer;
+ }
+};
+
+bigdl.Node = class {
+
+ constructor(metadata, module, tensors, values) {
+ const type = module.moduleType;
+ this.name = module.name;
+ this.attributes = [];
+ this.inputs = [];
+ this.outputs = [];
+ this.inputs.push(new bigdl.Argument('input', module.preModules.map((id) => values.map(id))));
+ this.type = metadata.type(type) || { name: type };
+ const inputs = this.type && this.type.inputs ? this.type.inputs.slice() : [];
+ inputs.shift();
+ if (module.weight) {
+ inputs.shift();
+ this.inputs.push(new bigdl.Argument('weight', [
+ new bigdl.Value('', null, new bigdl.Tensor(module.weight, tensors))
+ ]));
+ }
+ if (module.bias) {
+ inputs.shift();
+ this.inputs.push(new bigdl.Argument('bias', [
+ new bigdl.Value('', null, new bigdl.Tensor(module.bias, tensors))
+ ]));
+ }
+ if (module.parameters && module.parameters.length > 0) {
+ for (const parameter of module.parameters) {
+ const input = inputs.shift();
+ const inputName = input ? input.name : this.inputs.length.toString();
+ this.inputs.push(new bigdl.Argument(inputName, [
+ new bigdl.Value('', null, new bigdl.Tensor(parameter, tensors))
+ ]));
+ }
+ }
+ for (const [key, value] of Object.entries(module.attr)) {
+ if (key === 'module_numerics' || key === 'module_tags') {
+ continue;
+ }
+ if (value.dataType === bigdl.proto.DataType.TENSOR) {
+ if (value.value) {
+ this.inputs.push(new bigdl.Argument(key, [ new bigdl.Value('', null, new bigdl.Tensor(value.tensorValue, tensors)) ]));
+ }
+ continue;
+ }
+ if (value.dataType === bigdl.proto.DataType.REGULARIZER && value.value === undefined) {
+ continue;
+ }
+ if (value.dataType === bigdl.proto.DataType.ARRAY_VALUE && value.arrayValue.datatype === bigdl.proto.DataType.TENSOR) {
+ this.inputs.push(new bigdl.Argument(key, value.arrayValue.tensor.map((tensor) => new bigdl.Value('', null, new bigdl.Tensor(tensor, tensors)))));
+ continue;
+ }
+ this.attributes.push(new bigdl.Attribute(key, value));
+ }
+ const output = this.name || this.type + module.namePostfix;
+ this.outputs.push(new bigdl.Argument('output', [ values.map(output) ]));
+ }
+};
+
+bigdl.Attribute = class {
+
+ constructor(name, value) {
+ this.name = name;
+ switch (value.dataType) {
+ case bigdl.proto.DataType.INT32: {
+ this.type = 'int32';
+ this.value = value.int32Value;
+ break;
+ }
+ case bigdl.proto.DataType.FLOAT: {
+ this.type = 'float32';
+ this.value = value.floatValue;
+ break;
+ }
+ case bigdl.proto.DataType.DOUBLE: {
+ this.type = 'float64';
+ this.value = value.doubleValue;
+ break;
+ }
+ case bigdl.proto.DataType.BOOL: {
+ this.type = 'boolean';
+ this.value = value.boolValue;
+ break;
+ }
+ case bigdl.proto.DataType.REGULARIZER: {
+ this.value = value.value;
+ break;
+ }
+ case bigdl.proto.DataType.MODULE: {
+ this.value = value.bigDLModule;
+ break;
+ }
+ case bigdl.proto.DataType.NAME_ATTR_LIST: {
+ this.value = value.nameAttrListValue;
+ break;
+ }
+ case bigdl.proto.DataType.ARRAY_VALUE: {
+ switch (value.arrayValue.datatype) {
+ case bigdl.proto.DataType.INT32: {
+ this.type = 'int32[]';
+ this.value = value.arrayValue.i32;
+ break;
+ }
+ case bigdl.proto.DataType.FLOAT: {
+ this.type = 'float32[]';
+ this.value = value.arrayValue.flt;
+ break;
+ }
+ case bigdl.proto.DataType.STRING: {
+ this.type = 'string[]';
+ this.value = value.arrayValue.str;
+ break;
+ }
+ case bigdl.proto.DataType.TENSOR: {
+ this.type = 'tensor[]';
+ this.value = value.arrayValue.tensor;
+ break;
+ }
+ default: {
+ throw new bigdl.Error(`Unsupported attribute array data type '${value.arrayValue.datatype}'.`);
+ }
+ }
+ break;
+ }
+ case bigdl.proto.DataType.DATA_FORMAT: {
+ switch (value.dataFormatValue) {
+ case 0: this.value = 'NCHW'; break;
+ case 1: this.value = 'NHWC'; break;
+ default: throw new bigdl.Error(`Unsupported data format '${value.dataFormatValue}'.`);
+ }
+ break;
+ }
+ default: {
+ throw new bigdl.Error(`Unsupported attribute data type '${value.dataType}'.`);
+ }
+ }
+ }
+};
+
+bigdl.Tensor = class {
+
+ constructor(tensor /*, tensors */) {
+ this.type = new bigdl.TensorType(tensor.datatype, new bigdl.TensorShape(tensor.size));
+ /*
+ if (tensor && tensor.id && tensors && tensors[tensor.id] && tensors[tensor.id].tensorValue && tensors[tensor.id].tensorValue.storage) {
+ const storage = tensors[tensor.id].tensorValue.storage;
+ switch (this.type.dataType) {
+ case 'float32':
+ if (storage.bytes_data && storage.bytes_data.length > 0) {
+ this.values = storage.bytes_data[0];
+ this.encoding = '<';
+ }
+ else if (storage.float_data && storage.float_data.length > 0) {
+ this.values = storage.float_data;
+ this.encoding = '|';
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ */
+ }
+};
+
+bigdl.TensorType = class {
+
+ constructor(dataType, shape) {
+ switch (dataType) {
+ case bigdl.proto.DataType.FLOAT: this.dataType = 'float32'; break;
+ case bigdl.proto.DataType.DOUBLE: this.dataType = 'float64'; break;
+ default: throw new bigdl.Error(`Unsupported tensor type '${dataType}'.`);
+ }
+ this.shape = shape;
+ }
+
+ toString() {
+ return (this.dataType || '?') + this.shape.toString();
+ }
+};
+
+bigdl.TensorShape = class {
+
+ constructor(dimensions) {
+ this.dimensions = dimensions;
+ if (!dimensions.every((dimension) => Number.isInteger(dimension))) {
+ throw new bigdl.Error(`Invalid tensor shape '${JSON.stringify(dimensions)}'.`);
+ }
+ }
+
+ toString() {
+ return this.dimensions ? (`[${this.dimensions.map((dimension) => dimension.toString()).join(',')}]`) : '';
+ }
+};
+
+bigdl.Error = class extends Error {
+
+ constructor(message) {
+ super(message);
+ this.name = 'Error loading BigDL model.';
+ }
+};
+
+export const ModelFactory = bigdl.ModelFactory;
+
diff --git a/browser.js b/browser.js
new file mode 100644
index 00000000000..7fd80d6e31f
--- /dev/null
+++ b/browser.js
@@ -0,0 +1,834 @@
+
+import * as base from './base.js';
+
+const host = {};
+
+host.BrowserHost = class {
+
+ constructor() {
+ this._window = window;
+ this._navigator = window.navigator;
+ this._document = window.document;
+ this._telemetry = new base.Telemetry(this._window);
+ this._window.eval = () => {
+ throw new Error('window.eval() not supported.');
+ };
+ this._meta = {};
+ for (const element of Array.from(this._document.getElementsByTagName('meta'))) {
+ if (element.name !== undefined && element.content !== undefined) {
+ this._meta[element.name] = this._meta[element.name] || [];
+ this._meta[element.name].push(element.content);
+ }
+ }
+ this._environment = {
+ name: this._document.title,
+ type: this._meta.type ? this._meta.type[0] : 'Browser',
+ version: this._meta.version ? this._meta.version[0] : null,
+ date: Array.isArray(this._meta.date) && this._meta.date.length > 0 && this._meta.date[0] ? new Date(`${this._meta.date[0].split(' ').join('T')}Z`) : new Date(),
+ packaged: this._meta.version && this._meta.version[0] !== '0.0.0',
+ platform: /(Mac|iPhone|iPod|iPad)/i.test(this._navigator.platform) ? 'darwin' : undefined,
+ agent: this._navigator.userAgent.toLowerCase().indexOf('safari') !== -1 && this._navigator.userAgent.toLowerCase().indexOf('chrome') === -1 ? 'safari' : '',
+ repository: this._element('logo-github').getAttribute('href'),
+ menu: true
+ };
+ if (!/^\d\.\d\.\d$/.test(this.version)) {
+ throw new Error('Invalid version.');
+ }
+ }
+
+ get window() {
+ return this._window;
+ }
+
+ get document() {
+ return this._document;
+ }
+
+ get version() {
+ return this._environment.version;
+ }
+
+ get type() {
+ return this._environment.type;
+ }
+
+ async view(view) {
+ this._view = view;
+ const age = async () => {
+ const days = (new Date() - new Date(this._environment.date)) / (24 * 60 * 60 * 1000);
+ if (days > 180) {
+ this.document.body.classList.remove('spinner');
+ this.window.exports.terminate('Please update to the newest version.', 'Download', () => {
+ const link = this._element('logo-github').href;
+ this.openURL(link);
+ });
+ return new Promise(() => {});
+ }
+ return Promise.resolve();
+ };
+ const consent = async () => {
+ if (this._getCookie('consent') || this._getCookie('_ga')) {
+ return;
+ }
+ let consent = true;
+ try {
+ const text = await this._request('https://ipinfo.io/json', { 'Content-Type': 'application/json' }, 'utf-8', null, 2000);
+ const json = JSON.parse(text);
+ const countries = ['AT', 'BE', 'BG', 'HR', 'CZ', 'CY', 'DK', 'EE', 'FI', 'FR', 'DE', 'EL', 'HU', 'IE', 'IT', 'LV', 'LT', 'LU', 'MT', 'NL', 'NO', 'PL', 'PT', 'SK', 'ES', 'SE', 'GB', 'UK', 'GR', 'EU', 'RO'];
+ if (json && json.country && countries.indexOf(json.country) === -1) {
+ consent = false;
+ }
+ } catch (error) {
+ // continue regardless of error
+ }
+ if (consent) {
+ this.document.body.classList.remove('spinner');
+ await this._message('This app uses cookies to report errors and anonymous usage information.', 'Accept');
+ }
+ this._setCookie('consent', Date.now().toString(), 30);
+ };
+ const telemetry = async () => {
+ if (this._environment.packaged) {
+ this._window.addEventListener('error', (event) => {
+ const error = event instanceof ErrorEvent && event.error && event.error instanceof Error ? event.error : new Error(event && event.message ? event.message : JSON.stringify(event));
+ this.exception(error, true);
+ });
+ const measurement_id = '848W2NVWVH';
+ const user = this._getCookie('_ga').replace(/^(GA1\.\d\.)*/, '');
+ const session = this._getCookie(`_ga${measurement_id}`);
+ await this._telemetry.start(`G-${measurement_id}`, user, session);
+ this._telemetry.set('page_location', this._document.location && this._document.location.href ? this._document.location.href : null);
+ this._telemetry.set('page_title', this._document.title ? this._document.title : null);
+ this._telemetry.set('page_referrer', this._document.referrer ? this._document.referrer : null);
+ this._telemetry.send('page_view', {
+ app_name: this.type,
+ app_version: this.version,
+ });
+ this._telemetry.send('scroll', {
+ percent_scrolled: 90,
+ app_name: this.type,
+ app_version: this.version
+ });
+ this._setCookie('_ga', `GA1.2.${this._telemetry.get('client_id')}`, 1200);
+ this._setCookie(`_ga${measurement_id}`, `GS1.1.${this._telemetry.session}`, 1200);
+ }
+ };
+ const capabilities = async () => {
+ const filter = (list) => {
+ return list.filter((capability) => {
+ const path = capability.split('.').reverse();
+ let obj = this.window[path.pop()];
+ while (obj && path.length > 0) {
+ obj = obj[path.pop()];
+ }
+ return obj;
+ });
+ };
+ const capabilities = filter([ 'fetch', 'DataView.prototype.getBigInt64', 'Worker' ]);
+ this.event('browser_open', {
+ browser_capabilities: capabilities.map((capability) => capability.split('.').pop()).join(',')
+ });
+ return Promise.resolve();
+ };
+ await age();
+ await consent();
+ await telemetry();
+ await capabilities();
+ }
+
+ async start() {
+ const hash = this.window.location.hash ? this.window.location.hash.replace(/^#/, '') : '';
+ const search = this.window.location.search;
+ const params = new URLSearchParams(search + (hash ? `&${hash}` : ''));
+ if (this._meta.file && this._meta.identifier) {
+ const [url] = this._meta.file;
+ if (this._view.accept(url)) {
+ this._openModel(this._url(url), null);
+ this._document.title = this._meta.identifier;
+ return;
+ }
+ }
+ const url = params.get('url');
+ if (url) {
+ const identifier = params.get('identifier') || null;
+ const location = url
+ .replace(/^https:\/\/github\.com\/([\w-]*\/[\w-]*)\/blob\/([\w/\-_.]*)(\?raw=true)?$/, 'https://raw.githubusercontent.com/$1/$2')
+ .replace(/^https:\/\/github\.com\/([\w-]*\/[\w-]*)\/raw\/([\w/\-_.]*)$/, 'https://raw.githubusercontent.com/$1/$2')
+ .replace(/^https:\/\/huggingface.co\/(.*)\/blob\/(.*)$/, 'https://huggingface.co/$1/resolve/$2');
+ if (this._view.accept(identifier || location)) {
+ const title = await this._openModel(location, identifier);
+ if (title) {
+ this.document.title = title;
+ return;
+ }
+ }
+ }
+ const gist = params.get('gist');
+ if (gist) {
+ this._openGist(gist);
+ return;
+ }
+ const openFileButton = this._element('open-file-button');
+ const openFileDialog = this._element('open-file-dialog');
+ if (openFileButton && openFileDialog) {
+ openFileButton.addEventListener('click', () => {
+ this.execute('open');
+ });
+ const mobileSafari = this.environment('platform') === 'darwin' && navigator.maxTouchPoints && navigator.maxTouchPoints > 1;
+ if (!mobileSafari) {
+ const extensions = new base.Metadata().extensions.map((extension) => `.${extension}`);
+ openFileDialog.setAttribute('accept', extensions.join(', '));
+ }
+ openFileDialog.addEventListener('change', (e) => {
+ if (e.target && e.target.files && e.target.files.length > 0) {
+ const files = Array.from(e.target.files);
+ const file = files.find((file) => this._view.accept(file.name, file.size));
+ if (file) {
+ this._open(file, files);
+ }
+ }
+ });
+ }
+ this.document.addEventListener('dragover', (e) => {
+ e.preventDefault();
+ });
+ this.document.addEventListener('drop', (e) => {
+ e.preventDefault();
+ });
+ this.document.body.addEventListener('drop', (e) => {
+ e.preventDefault();
+ if (e.dataTransfer && e.dataTransfer.files && e.dataTransfer.files.length > 0) {
+ const files = Array.from(e.dataTransfer.files);
+ const file = files.find((file) => this._view.accept(file.name, file.size));
+ if (file) {
+ this._open(file, files);
+ }
+ }
+ });
+ this._view.show('welcome');
+ }
+
+ environment(name) {
+ return this._environment[name];
+ }
+
+ async error(message, detail /*, cancel */) {
+ alert((message == 'Error' ? '' : `${message} `) + detail);
+ return 0;
+ }
+
+ confirm(message, detail) {
+ return confirm(`${message} ${detail}`);
+ }
+
+ async require(id) {
+ return import(`${id}.js`);
+ }
+
+ save(name, extension, defaultPath, callback) {
+ callback(`${defaultPath}.${extension}`);
+ }
+
+ export(file, blob) {
+ const element = this.document.createElement('a');
+ element.download = file;
+ element.href = URL.createObjectURL(blob);
+ this.document.body.appendChild(element);
+ element.click();
+ this.document.body.removeChild(element);
+ }
+
+ execute(name /*, value */) {
+ switch (name) {
+ case 'open': {
+ const openFileDialog = this._element('open-file-dialog');
+ if (openFileDialog) {
+ openFileDialog.value = '';
+ openFileDialog.click();
+ }
+ break;
+ }
+ case 'report-issue': {
+ this.openURL(`${this.environment('repository')}/issues/new`);
+ break;
+ }
+ case 'about': {
+ this._view.about();
+ break;
+ }
+ default: {
+ break;
+ }
+ }
+ }
+
+ request(file, encoding, base) {
+ const url = base ? (`${base}/${file}`) : this._url(file);
+ return this._request(url, null, encoding);
+ }
+
+ openURL(url) {
+ this.window.location = url;
+ }
+
+ exception(error, fatal) {
+ if (this._telemetry && error) {
+ const name = error.name ? `${error.name}: ` : '';
+ const message = error.message ? error.message : JSON.stringify(error);
+ let context = '';
+ let stack = '';
+ if (error.stack) {
+ const format = (file, line, column) => {
+ return `${file.split('\\').join('/').split('/').pop()}:${line}:${column}`;
+ };
+ const match = error.stack.match(/\n {4}at (.*) \((.*):(\d*):(\d*)\)/);
+ if (match) {
+ stack = `${match[1]} (${format(match[2], match[3], match[4])})`;
+ } else {
+ const match = error.stack.match(/\n {4}at (.*):(\d*):(\d*)/);
+ if (match) {
+ stack = `(${format(match[1], match[2], match[3])})`;
+ } else {
+ const match = error.stack.match(/\n {4}at (.*)\((.*)\)/);
+ if (match) {
+ stack = `(${format(match[1], match[2], match[3])})`;
+ } else {
+ const match = error.stack.match(/\s*@\s*(.*):(.*):(.*)/);
+ if (match) {
+ stack = `(${format(match[1], match[2], match[3])})`;
+ } else {
+ const match = error.stack.match(/.*\n\s*(.*)\s*/);
+ if (match) {
+ [, stack] = match;
+ }
+ }
+ }
+ }
+ }
+ }
+ if (error.context) {
+ context = typeof error.context === 'string' ? error.context : JSON.stringify(error.context);
+ }
+ this._telemetry.send('exception', {
+ app_name: this.type,
+ app_version: this.version,
+ error_name: name,
+ error_message: message,
+ error_context: context,
+ error_stack: stack,
+ error_fatal: fatal ? true : false
+ });
+ }
+ }
+
+ event(name, params) {
+ if (name && params) {
+ params.app_name = this.type;
+ params.app_version = this.version;
+ this._telemetry.send(name, params);
+ }
+ }
+
+ _request(url, headers, encoding, callback, timeout) {
+ return new Promise((resolve, reject) => {
+ const request = new XMLHttpRequest();
+ if (!encoding) {
+ request.responseType = 'arraybuffer';
+ }
+ if (timeout) {
+ request.timeout = timeout;
+ }
+ const error = (status) => {
+ const err = new Error(`The web request failed with status code ${status} at '${url}'.`);
+ err.type = 'error';
+ err.url = url;
+ return err;
+ };
+ const progress = (value) => {
+ if (callback) {
+ callback(value);
+ }
+ };
+ request.onload = () => {
+ progress(0);
+ if (request.status == 200) {
+ if (request.responseType == 'arraybuffer') {
+ const buffer = new Uint8Array(request.response);
+ const stream = new base.BinaryStream(buffer);
+ resolve(stream);
+ } else {
+ resolve(request.responseText);
+ }
+ } else {
+ reject(error(request.status));
+ }
+ };
+ request.onerror = (e) => {
+ progress(0);
+ const err = error(request.status);
+ err.type = e.type;
+ reject(err);
+ };
+ request.ontimeout = () => {
+ progress(0);
+ request.abort();
+ const err = new Error(`The web request timed out in '${url}'.`);
+ err.type = 'timeout';
+ err.url = url;
+ reject(err);
+ };
+ request.onprogress = (e) => {
+ if (e && e.lengthComputable) {
+ progress(e.loaded / e.total * 100);
+ }
+ };
+ request.open('GET', url, true);
+ if (headers) {
+ for (const [name, value] of Object.entries(headers)) {
+ request.setRequestHeader(name, value);
+ }
+ }
+ request.send();
+ });
+ }
+
+ _url(file) {
+ file = file.startsWith('./') ? file.substring(2) : file.startsWith('/') ? file.substring(1) : file;
+ const location = this.window.location;
+ const pathname = location.pathname.endsWith('/') ?
+ location.pathname :
+ `${location.pathname.split('/').slice(0, -1).join('/')}/`;
+ return `${location.protocol}//${location.host}${pathname}${file}`;
+ }
+
+ async _openModel(url, identifier) {
+ url = url.startsWith('data:') ? url : `${url + ((/\?/).test(url) ? '&' : '?')}cb=${(new Date()).getTime()}`;
+ this._view.show('welcome spinner');
+ let context = null;
+ try {
+ const progress = (value) => {
+ this._view.progress(value);
+ };
+ let stream = await this._request(url, null, null, progress);
+ if (url.startsWith('https://raw.githubusercontent.com/') && stream.length < 150) {
+ const buffer = stream.peek();
+ const content = Array.from(buffer).map((c) => String.fromCodePoint(c)).join('');
+ if (content.split('\n')[0] === 'version https://git-lfs.github.com/spec/v1') {
+ url = url.replace('https://raw.githubusercontent.com/', 'https://media.githubusercontent.com/media/');
+ stream = await this._request(url, null, null, progress);
+ }
+ }
+ context = new host.BrowserHost.Context(this, url, identifier, stream);
+ this._telemetry.set('session_engaged', 1);
+ } catch (error) {
+ await this.error('Model load request failed.', error.message);
+ this._view.show('welcome');
+ return null;
+ }
+ try {
+ await this._view.open(context);
+ return identifier || context.identifier;
+ } catch (err) {
+ if (err) {
+ this._view.error(err, null, 'welcome');
+ }
+ return null;
+ }
+ }
+
+ async _open(file, files) {
+ this._view.show('welcome spinner');
+ const context = new host.BrowserHost.BrowserFileContext(this, file, files);
+ try {
+ await context.open();
+ this._telemetry.set('session_engaged', 1);
+ await this._view.open(context);
+ this._view.show(null);
+ this.document.title = files[0].name;
+ } catch (error) {
+ this._view.error(error, null, null);
+ }
+ }
+
+ async _openGist(gist) {
+ this._view.show('welcome spinner');
+ const url = `https://api.github.com/gists/${gist}`;
+ try {
+ const text = await this._request(url, { 'Content-Type': 'application/json' }, 'utf-8');
+ const json = JSON.parse(text);
+ if (json.message) {
+ this.error('Error while loading Gist.', json.message);
+ return;
+ }
+ const file = Object.values(json.files).find((file) => this._view.accept(file.filename));
+ if (!file) {
+ this.error('Error while loading Gist.', 'Gist does not contain a model file.');
+ return;
+ }
+ const identifier = file.filename;
+ const encoder = new TextEncoder();
+ const buffer = encoder.encode(file.content);
+ const stream = new base.BinaryStream(buffer);
+ const context = new host.BrowserHost.Context(this, '', identifier, stream);
+ this._telemetry.set('session_engaged', 1);
+ try {
+ await this._view.open(context);
+ this.document.title = identifier;
+ } catch (error) {
+ if (error) {
+ this._view.error(error, error.name, 'welcome');
+ }
+ }
+ } catch (error) {
+ this._view.error(error, 'Model load request failed.', 'welcome');
+ }
+ }
+
+ _setCookie(name, value, days) {
+ this.document.cookie = `${name}=; Max-Age=0`;
+ const location = this.window.location;
+ const domain = location && location.hostname && location.hostname.indexOf('.') !== -1 ? `;domain=.${location.hostname.split('.').slice(-2).join('.')}` : '';
+ const date = new Date();
+ date.setTime(date.getTime() + (days * 24 * 60 * 60 * 1000));
+ this.document.cookie = `${name}=${value}${domain};path=/;expires=${date.toUTCString()}`;
+ }
+
+ _getCookie(name) {
+ for (const cookie of this.document.cookie.split(';')) {
+ const entry = cookie.split('=');
+ if (entry[0].trim() === name) {
+ return entry[1].trim();
+ }
+ }
+ return '';
+ }
+
+ get(name) {
+ try {
+ if (typeof this.window.localStorage !== 'undefined') {
+ const content = this.window.localStorage.getItem(name);
+ return JSON.parse(content);
+ }
+ } catch (error) {
+ // continue regardless of error
+ }
+ return undefined;
+ }
+
+ set(name, value) {
+ try {
+ if (typeof this.window.localStorage !== 'undefined') {
+ this.window.localStorage.setItem(name, JSON.stringify(value));
+ }
+ } catch (error) {
+ // continue regardless of error
+ }
+ }
+
+ delete(name) {
+ try {
+ if (typeof this.window.localStorage !== 'undefined') {
+ this.window.localStorage.removeItem(name);
+ }
+ } catch (error) {
+ // continue regardless of error
+ }
+ }
+
+ _element(id) {
+ return this.document.getElementById(id);
+ }
+
+ _message(message, action) {
+ return new Promise((resolve) => {
+ this._element('message-text').innerText = message;
+ const button = this._element('message-button');
+ if (action) {
+ button.style.removeProperty('display');
+ button.innerText = action;
+ button.onclick = () => {
+ button.onclick = null;
+ this._document.body.classList.remove('message');
+ resolve(0);
+ };
+ button.focus();
+ } else {
+ button.style.display = 'none';
+ button.onclick = null;
+ }
+ this._document.body.classList.add('message');
+ });
+ }
+};
+
+host.BrowserHost.BrowserFileContext = class {
+
+ constructor(host, file, blobs) {
+ this._host = host;
+ this._file = file;
+ this._blobs = {};
+ for (const blob of blobs) {
+ this._blobs[blob.name] = blob;
+ }
+ }
+
+ get identifier() {
+ return this._file.name;
+ }
+
+ get stream() {
+ return this._stream;
+ }
+
+ async request(file, encoding, basename) {
+ if (basename !== undefined) {
+ return this._host.request(file, encoding, basename);
+ }
+ const blob = this._blobs[file];
+ if (!blob) {
+ throw new Error(`File not found '${file}'.`);
+ }
+ return new Promise((resolve, reject) => {
+ const reader = new FileReader();
+ const size = 0x10000000;
+ let position = 0;
+ const chunks = [];
+ reader.onload = (e) => {
+ if (encoding) {
+ resolve(e.target.result);
+ } else {
+ const buffer = new Uint8Array(e.target.result);
+ if (position === 0 && buffer.length === blob.size) {
+ const stream = new base.BinaryStream(buffer);
+ resolve(stream);
+ } else {
+ chunks.push(buffer);
+ position += buffer.length;
+ if (position < blob.size) {
+ const slice = blob.slice(position, Math.min(position + size, blob.size));
+ reader.readAsArrayBuffer(slice);
+ } else {
+ const stream = new host.BrowserHost.FileStream(chunks, size, 0, position);
+ resolve(stream);
+ }
+ }
+ }
+ };
+ reader.onerror = (event) => {
+ event = event || this._host.window.event;
+ let message = '';
+ const error = event.target.error;
+ switch (error.code) {
+ case error.NOT_FOUND_ERR:
+ message = `File not found '${file}'.`;
+ break;
+ case error.NOT_READABLE_ERR:
+ message = `File not readable '${file}'.`;
+ break;
+ case error.SECURITY_ERR:
+ message = `File access denied '${file}'.`;
+ break;
+ default:
+ message = error.message ? error.message : `File read '${error.code}' error '${file}'.`;
+ break;
+ }
+ reject(new Error(message));
+ };
+ if (encoding === 'utf-8') {
+ reader.readAsText(blob, encoding);
+ } else {
+ const slice = blob.slice(position, Math.min(position + size, blob.size));
+ reader.readAsArrayBuffer(slice);
+ }
+ });
+ }
+
+ async require(id) {
+ return await this._host.require(id);
+ }
+
+ exception(error, fatal) {
+ this._host.exception(error, fatal);
+ }
+
+ async open() {
+ this._stream = await this.request(this._file.name, null);
+ }
+};
+
+host.BrowserHost.FileStream = class {
+
+ constructor(chunks, size, start, length) {
+ this._chunks = chunks;
+ this._size = size;
+ this._start = start;
+ this._length = length;
+ this._position = 0;
+ }
+
+ get position() {
+ return this._position;
+ }
+
+ get length() {
+ return this._length;
+ }
+
+ stream(length) {
+ const file = new host.BrowserHost.FileStream(this._chunks, this._size, this._start + this._position, length);
+ this.skip(length);
+ return file;
+ }
+
+ seek(position) {
+ this._position = position >= 0 ? position : this._length + position;
+ }
+
+ skip(offset) {
+ this._position += offset;
+ if (this._position > this._length) {
+ throw new Error(`Expected ${this._position - this._length} more bytes. The file might be corrupted. Unexpected end of file.`);
+ }
+ }
+
+ peek(length) {
+ length = length !== undefined ? length : this._length - this._position;
+ if (length < 0x10000000) {
+ const position = this._fill(length);
+ this._position -= length;
+ return this._buffer.subarray(position, position + length);
+ }
+ const position = this._start + this._position;
+ this.skip(length);
+ this.seek(position);
+ const buffer = new Uint8Array(length);
+ this._read(buffer, position);
+ return buffer;
+ }
+
+ read(length) {
+ length = length !== undefined ? length : this._length - this._position;
+ if (length < 0x10000000) {
+ const position = this._fill(length);
+ return this._buffer.subarray(position, position + length);
+ }
+ const position = this._start + this._position;
+ this.skip(length);
+ const buffer = new Uint8Array(length);
+ this._read(buffer, position);
+ return buffer;
+ }
+
+ byte() {
+ const position = this._fill(1);
+ return this._buffer[position];
+ }
+
+ _fill(length) {
+ if (this._position + length > this._length) {
+ throw new Error(`Expected ${this._position + length - this._length} more bytes. The file might be corrupted. Unexpected end of file.`);
+ }
+ if (!this._buffer || this._position < this._offset || this._position + length > this._offset + this._buffer.length) {
+ this._offset = this._start + this._position;
+ this._buffer = new Uint8Array(Math.min(0x10000000, this._start + this._length - this._offset));
+ this._read(this._buffer, this._offset);
+ }
+ const position = this._start + this._position - this._offset;
+ this._position += length;
+ return position;
+ }
+
+ _read(buffer, offset) {
+ let index = Math.floor(offset / this._size);
+ offset = offset - (index * this._size);
+ const chunk = this._chunks[index++];
+ let destination = Math.min(chunk.length - offset, buffer.length);
+ buffer.set(chunk.subarray(offset, offset + destination), 0);
+ while (destination < buffer.length) {
+ const chunk = this._chunks[index++];
+ const size = Math.min(this._size, buffer.length - destination);
+ buffer.set(chunk.subarray(0, size), destination);
+ destination += size;
+ }
+ }
+};
+
+host.BrowserHost.Context = class {
+
+ constructor(host, url, identifier, stream) {
+ this._host = host;
+ this._stream = stream;
+ if (identifier) {
+ this._identifier = identifier;
+ this._base = url;
+ if (this._base.endsWith('/')) {
+ this._base.substring(0, this._base.length - 1);
+ }
+ } else {
+ const parts = url.split('?')[0].split('/');
+ this._identifier = parts.pop();
+ this._base = parts.join('/');
+ }
+ }
+
+ get identifier() {
+ return this._identifier;
+ }
+
+ get stream() {
+ return this._stream;
+ }
+
+ request(file, encoding, base) {
+ return this._host.request(file, encoding, base === undefined ? this._base : base);
+ }
+
+ require(id) {
+ return this._host.require(id);
+ }
+
+ exception(error, fatal) {
+ this._host.exception(error, fatal);
+ }
+};
+
+if (!('scrollBehavior' in window.document.documentElement.style)) {
+ const __scrollTo__ = Element.prototype.scrollTo;
+ Element.prototype.scrollTo = function(options) {
+ if (options !== undefined) {
+ if (options === null || typeof options !== 'object' || options.behavior === undefined || arguments[0].behavior === 'auto' || options.behavior === 'instant') {
+ if (__scrollTo__) {
+ __scrollTo__.apply(this, arguments);
+ }
+ } else {
+ const now = () => window.performance && window.performance.now ? window.performance.now() : Date.now();
+ const ease = (k) => 0.5 * (1 - Math.cos(Math.PI * k));
+ const step = (context) => {
+ const value = ease(Math.min((now() - context.startTime) / 468, 1));
+ const x = context.startX + (context.x - context.startX) * value;
+ const y = context.startY + (context.y - context.startY) * value;
+ context.element.scrollLeft = x;
+ context.element.scrollTop = y;
+ if (x !== context.x || y !== context.y) {
+ window.requestAnimationFrame(step.bind(window, context));
+ }
+ };
+ const context = {
+ element: this,
+ x: typeof options.left === 'undefined' ? this.scrollLeft : ~~options.left,
+ y: typeof options.top === 'undefined' ? this.scrollTop : ~~options.top,
+ startX: this.scrollLeft,
+ startY: this.scrollTop,
+ startTime: now()
+ };
+ step(context);
+ }
+ }
+ };
+}
+
+if (typeof window !== 'undefined' && window.exports) {
+ window.exports.browser = host;
+}
+
+export const BrowserHost = host.BrowserHost;
diff --git a/caffe-metadata.json b/caffe-metadata.json
new file mode 100644
index 00000000000..3d7c1927eec
--- /dev/null
+++ b/caffe-metadata.json
@@ -0,0 +1,462 @@
+[
+ {
+ "name": "Accuracy",
+ "inputs": [
+ { "name": "predictions" },
+ { "name": "labels" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "AnnotatedData",
+ "category": "Data",
+ "outputs": [
+ { "name": "data" }
+ ]
+ },
+ {
+ "name": "BatchNorm",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "use_global_stats", "type": "boolean", "visible": false },
+ { "name": "eps", "type": "float32", "default": 0.00001 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "gamma" },
+ { "name": "beta" },
+ { "name": "mean" },
+ { "name": "variance" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "BN",
+ "category": "Normalization",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "ColorConv",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Concat",
+ "category": "Tensor",
+ "inputs": [
+ { "name": "inputs", "option": "variadic" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "ContrastiveLossParameter",
+ "attributes": [
+ { "name": "margin", "default": 1 },
+ { "name": "legacy_version", "default": false }
+ ]
+ },
+ {
+ "name": "Convolution",
+ "category": "Layer",
+ "attributes": [
+ { "name": "bias_term", "visible": false },
+ { "name": "weight_filler", "visible": false },
+ { "name": "bias_filler", "visible": false },
+ { "name": "num_output", "visible": false },
+ { "name": "pad", "default": [ 0 ] },
+ { "name": "kernel_size", "default": [] },
+ { "name": "stride", "default": [ 1 ] },
+ { "name": "dilation", "default": [] },
+ { "name": "group", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "filter" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "ConvolutionDepthwise",
+ "category": "Layer",
+ "attributes": [
+ { "name": "pad", "default": [ 0 ] },
+ { "name": "kernel_size", "default": [] },
+ { "name": "stride", "default": [ 1 ] },
+ { "name": "bias_term", "visible": false },
+ { "name": "weight_filler", "visible": false },
+ { "name": "bias_filler", "visible": false },
+ { "name": "num_output", "visible": false }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "filter" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Crop",
+ "category": "Data",
+ "inputs": [
+ { "name": "data" },
+ { "name": "size" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Data",
+ "category": "Data",
+ "outputs": [
+ { "name": "data" },
+ { "name": "label" }
+ ]
+ },
+ {
+ "name": "Deconvolution",
+ "category": "Layer",
+ "attributes": [
+ { "name": "bias_term", "visible": false },
+ { "name": "weight_filler", "visible": false },
+ { "name": "bias_filler", "visible": false },
+ { "name": "num_output", "visible": false },
+ { "name": "pad", "default": [] },
+ { "name": "kernel_size", "default": [] },
+ { "name": "stride", "default": [] },
+ { "name": "dilation", "default": [] }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "filter" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "DepthwiseConvolution",
+ "category": "Layer",
+ "attributes": [
+ { "name": "bias_term", "visible": false },
+ { "name": "weight_filler", "visible": false },
+ { "name": "bias_filler", "visible": false },
+ { "name": "num_output", "visible": false }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "filter" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Dropout",
+ "category": "Dropout",
+ "attributes": [
+ { "name": "dropout_ratio", "default": 0.5 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "DummyData",
+ "category": "Data",
+ "outputs": [
+ { "name": "data" }
+ ]
+ },
+ {
+ "name": "Eltwise",
+ "attributes": [
+ { "name": "operation", "type": "EltwiseParameter.EltwiseOp", "default": 1 },
+ { "name": "coeff", "type": "float32[]", "default": [] },
+ { "name": "stable_prod_grad", "type": "boolean", "default": true }
+ ],
+ "inputs": [
+ { "name": "inputs", "option": "variadic" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "EuclideanLoss",
+ "inputs": [
+ { "name": "predictions" },
+ { "name": "targets" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Flatten",
+ "category": "Shape",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "HDF5Data",
+ "category": "Data",
+ "outputs": [
+ { "name": "data" }
+ ]
+ },
+ {
+ "name": "ImageData",
+ "category": "Data",
+ "outputs": [
+ { "name": "data" },
+ { "name": "label" }
+ ]
+ },
+ {
+ "name": "InnerProduct",
+ "category": "Layer",
+ "attributes": [
+ { "name": "bias_term", "visible": false },
+ { "name": "weight_filler", "visible": false },
+ { "name": "bias_filler", "visible": false },
+ { "name": "num_output", "visible": false }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "LRN",
+ "category": "Normalization",
+ "attributes": [
+ { "name": "local_size", "type": "uint32", "default": 5 },
+ { "name": "alpha", "type": "float32", "default": 0.0001 },
+ { "name": "beta", "type": "float32", "default": 0.75 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "LSTM",
+ "category": "Layer",
+ "attributes": [
+ { "name": "weight_filler", "visible": false },
+ { "name": "bias_filler", "visible": false },
+ { "name": "num_output", "visible": false }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "weights" },
+ { "name": "h_0" },
+ { "name": "c_0" }
+ ],
+ "outputs": [
+ { "name": "output" },
+ { "name": "h_T" },
+ { "name": "c_T" }
+ ]
+ },
+ {
+ "name": "Parameter",
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Permute",
+ "category": "Shape",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Pooling",
+ "category": "Pool",
+ "attributes": [
+ { "name": "pool", "type": "PoolingParameter.PoolMethod", "default": 0 },
+ { "name": "engine", "type": "PoolingParameter.Engine", "default": 0 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "PReLU",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" },
+ { "name": "slope" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Python"
+ },
+ {
+ "name": "ReLU",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "ReLU6",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Reshape",
+ "category": "Shape",
+ "inputs": [
+ { "name": "data" }
+ ],
+ "outputs": [
+ { "name": "reshaped" }
+ ]
+ },
+ {
+ "name": "Scale",
+ "category": "Layer",
+ "attributes": [
+ { "name": "filler", "visible": false },
+ { "name": "bias_term", "visible": false },
+ { "name": "bias_filler", "visible": false }
+ ],
+ "inputs": [
+ { "name": "input" },
+ { "name": "scale" },
+ { "name": "bias" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Sigmoid",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Slice",
+ "category": "Tensor",
+ "attributes": [
+ { "name": "axis", "default": 1 }
+ ],
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "outputs", "option": "variadic" }
+ ]
+ },
+ {
+ "name": "Softmax",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "SoftmaxLoss",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" },
+ { "name": "labels" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "SoftmaxWithLoss",
+ "category": "Activation",
+ "inputs": [
+ { "name": "input" },
+ { "name": "labels" }
+ ],
+ "outputs": [
+ { "name": "output" }
+ ]
+ },
+ {
+ "name": "Split",
+ "category": "Tensor",
+ "inputs": [
+ { "name": "input" }
+ ],
+ "outputs": [
+ { "name": "outputs", "option": "variadic" }
+ ]
+ },
+ {
+ "name": "WindowData",
+ "category": "Data",
+ "outputs": [
+ { "name": "data" },
+ { "name": "label" }
+ ]
+ }
+]
\ No newline at end of file
diff --git a/caffe-proto.js b/caffe-proto.js
new file mode 100644
index 00000000000..095f504f714
--- /dev/null
+++ b/caffe-proto.js
@@ -0,0 +1,5357 @@
+
+import * as protobuf from './protobuf.js';
+
+const $root = protobuf.get('caffe');
+
+$root.caffe = {};
+
+$root.caffe.BlobShape = class BlobShape {
+
+ constructor() {
+ this.dim = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.BlobShape();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.dim = reader.array(message.dim, () => reader.int64(), tag);
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.BlobShape();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "dim":
+ reader.array(message.dim, () => reader.int64());
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.BlobProto = class BlobProto {
+
+ constructor() {
+ this.data = [];
+ this.diff = [];
+ this.double_data = [];
+ this.double_diff = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.BlobProto();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 7:
+ message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32());
+ break;
+ case 5:
+ message.data = reader.floats(message.data, tag);
+ break;
+ case 6:
+ message.diff = reader.floats(message.diff, tag);
+ break;
+ case 8:
+ message.double_data = reader.doubles(message.double_data, tag);
+ break;
+ case 9:
+ message.double_diff = reader.doubles(message.double_diff, tag);
+ break;
+ case 1:
+ message.num = reader.int32();
+ break;
+ case 2:
+ message.channels = reader.int32();
+ break;
+ case 3:
+ message.height = reader.int32();
+ break;
+ case 4:
+ message.width = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.BlobProto();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "shape":
+ message.shape = $root.caffe.BlobShape.decodeText(reader);
+ break;
+ case "data":
+ reader.array(message.data, () => reader.float());
+ break;
+ case "diff":
+ reader.array(message.diff, () => reader.float());
+ break;
+ case "double_data":
+ reader.array(message.double_data, () => reader.double());
+ break;
+ case "double_diff":
+ reader.array(message.double_diff, () => reader.double());
+ break;
+ case "num":
+ message.num = reader.int32();
+ break;
+ case "channels":
+ message.channels = reader.int32();
+ break;
+ case "height":
+ message.height = reader.int32();
+ break;
+ case "width":
+ message.width = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.BlobProto.prototype.shape = null;
+$root.caffe.BlobProto.prototype.num = 0;
+$root.caffe.BlobProto.prototype.channels = 0;
+$root.caffe.BlobProto.prototype.height = 0;
+$root.caffe.BlobProto.prototype.width = 0;
+
+$root.caffe.BlobProtoVector = class BlobProtoVector {
+
+ constructor() {
+ this.blobs = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.BlobProtoVector();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.BlobProtoVector();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "blobs":
+ message.blobs.push($root.caffe.BlobProto.decodeText(reader));
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.Datum = class Datum {
+
+ constructor() {
+ this.float_data = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.Datum();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.channels = reader.int32();
+ break;
+ case 2:
+ message.height = reader.int32();
+ break;
+ case 3:
+ message.width = reader.int32();
+ break;
+ case 4:
+ message.data = reader.bytes();
+ break;
+ case 5:
+ message.label = reader.int32();
+ break;
+ case 6:
+ message.float_data = reader.floats(message.float_data, tag);
+ break;
+ case 7:
+ message.encoded = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.Datum();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "channels":
+ message.channels = reader.int32();
+ break;
+ case "height":
+ message.height = reader.int32();
+ break;
+ case "width":
+ message.width = reader.int32();
+ break;
+ case "data":
+ message.data = reader.bytes();
+ break;
+ case "label":
+ message.label = reader.int32();
+ break;
+ case "float_data":
+ reader.array(message.float_data, () => reader.float());
+ break;
+ case "encoded":
+ message.encoded = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.Datum.prototype.channels = 0;
+$root.caffe.Datum.prototype.height = 0;
+$root.caffe.Datum.prototype.width = 0;
+$root.caffe.Datum.prototype.data = new Uint8Array([]);
+$root.caffe.Datum.prototype.label = 0;
+$root.caffe.Datum.prototype.encoded = false;
+
+$root.caffe.FillerParameter = class FillerParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.FillerParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.type = reader.string();
+ break;
+ case 2:
+ message.value = reader.float();
+ break;
+ case 3:
+ message.min = reader.float();
+ break;
+ case 4:
+ message.max = reader.float();
+ break;
+ case 5:
+ message.mean = reader.float();
+ break;
+ case 6:
+ message.std = reader.float();
+ break;
+ case 7:
+ message.sparse = reader.int32();
+ break;
+ case 8:
+ message.variance_norm = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.FillerParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "type":
+ message.type = reader.string();
+ break;
+ case "value":
+ message.value = reader.float();
+ break;
+ case "min":
+ message.min = reader.float();
+ break;
+ case "max":
+ message.max = reader.float();
+ break;
+ case "mean":
+ message.mean = reader.float();
+ break;
+ case "std":
+ message.std = reader.float();
+ break;
+ case "sparse":
+ message.sparse = reader.int32();
+ break;
+ case "variance_norm":
+ message.variance_norm = reader.enum($root.caffe.FillerParameter.VarianceNorm);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.FillerParameter.prototype.type = "constant";
+$root.caffe.FillerParameter.prototype.value = 0;
+$root.caffe.FillerParameter.prototype.min = 0;
+$root.caffe.FillerParameter.prototype.max = 1;
+$root.caffe.FillerParameter.prototype.mean = 0;
+$root.caffe.FillerParameter.prototype.std = 1;
+$root.caffe.FillerParameter.prototype.sparse = -1;
+$root.caffe.FillerParameter.prototype.variance_norm = 0;
+
+$root.caffe.FillerParameter.VarianceNorm = {
+ "FAN_IN": 0,
+ "FAN_OUT": 1,
+ "AVERAGE": 2
+};
+
+$root.caffe.NetParameter = class NetParameter {
+
+ constructor() {
+ this.input = [];
+ this.input_shape = [];
+ this.input_dim = [];
+ this.layer = [];
+ this.layers = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.NetParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 3:
+ message.input.push(reader.string());
+ break;
+ case 8:
+ message.input_shape.push($root.caffe.BlobShape.decode(reader, reader.uint32()));
+ break;
+ case 4:
+ message.input_dim = reader.array(message.input_dim, () => reader.int32(), tag);
+ break;
+ case 5:
+ message.force_backward = reader.bool();
+ break;
+ case 6:
+ message.state = $root.caffe.NetState.decode(reader, reader.uint32());
+ break;
+ case 7:
+ message.debug_info = reader.bool();
+ break;
+ case 100:
+ message.layer.push($root.caffe.LayerParameter.decode(reader, reader.uint32()));
+ break;
+ case 2:
+ message.layers.push($root.caffe.V1LayerParameter.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.NetParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "name":
+ message.name = reader.string();
+ break;
+ case "input":
+ reader.array(message.input, () => reader.string());
+ break;
+ case "input_shape":
+ message.input_shape.push($root.caffe.BlobShape.decodeText(reader));
+ break;
+ case "input_dim":
+ reader.array(message.input_dim, () => reader.int32());
+ break;
+ case "force_backward":
+ message.force_backward = reader.bool();
+ break;
+ case "state":
+ message.state = $root.caffe.NetState.decodeText(reader);
+ break;
+ case "debug_info":
+ message.debug_info = reader.bool();
+ break;
+ case "layer":
+ message.layer.push($root.caffe.LayerParameter.decodeText(reader));
+ break;
+ case "layers":
+ message.layers.push($root.caffe.V1LayerParameter.decodeText(reader));
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.NetParameter.prototype.name = "";
+$root.caffe.NetParameter.prototype.force_backward = false;
+$root.caffe.NetParameter.prototype.state = null;
+$root.caffe.NetParameter.prototype.debug_info = false;
+
+$root.caffe.SolverParameter = class SolverParameter {
+
+ constructor() {
+ this.test_net = [];
+ this.test_net_param = [];
+ this.test_state = [];
+ this.test_iter = [];
+ this.stepvalue = [];
+ this.weights = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SolverParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 24:
+ message.net = reader.string();
+ break;
+ case 25:
+ message.net_param = $root.caffe.NetParameter.decode(reader, reader.uint32());
+ break;
+ case 1:
+ message.train_net = reader.string();
+ break;
+ case 2:
+ message.test_net.push(reader.string());
+ break;
+ case 21:
+ message.train_net_param = $root.caffe.NetParameter.decode(reader, reader.uint32());
+ break;
+ case 22:
+ message.test_net_param.push($root.caffe.NetParameter.decode(reader, reader.uint32()));
+ break;
+ case 26:
+ message.train_state = $root.caffe.NetState.decode(reader, reader.uint32());
+ break;
+ case 27:
+ message.test_state.push($root.caffe.NetState.decode(reader, reader.uint32()));
+ break;
+ case 3:
+ message.test_iter = reader.array(message.test_iter, () => reader.int32(), tag);
+ break;
+ case 4:
+ message.test_interval = reader.int32();
+ break;
+ case 19:
+ message.test_compute_loss = reader.bool();
+ break;
+ case 32:
+ message.test_initialization = reader.bool();
+ break;
+ case 5:
+ message.base_lr = reader.float();
+ break;
+ case 6:
+ message.display = reader.int32();
+ break;
+ case 33:
+ message.average_loss = reader.int32();
+ break;
+ case 7:
+ message.max_iter = reader.int32();
+ break;
+ case 36:
+ message.iter_size = reader.int32();
+ break;
+ case 8:
+ message.lr_policy = reader.string();
+ break;
+ case 9:
+ message.gamma = reader.float();
+ break;
+ case 10:
+ message.power = reader.float();
+ break;
+ case 11:
+ message.momentum = reader.float();
+ break;
+ case 12:
+ message.weight_decay = reader.float();
+ break;
+ case 29:
+ message.regularization_type = reader.string();
+ break;
+ case 13:
+ message.stepsize = reader.int32();
+ break;
+ case 34:
+ message.stepvalue = reader.array(message.stepvalue, () => reader.int32(), tag);
+ break;
+ case 35:
+ message.clip_gradients = reader.float();
+ break;
+ case 14:
+ message.snapshot = reader.int32();
+ break;
+ case 15:
+ message.snapshot_prefix = reader.string();
+ break;
+ case 16:
+ message.snapshot_diff = reader.bool();
+ break;
+ case 37:
+ message.snapshot_format = reader.int32();
+ break;
+ case 17:
+ message.solver_mode = reader.int32();
+ break;
+ case 18:
+ message.device_id = reader.int32();
+ break;
+ case 20:
+ message.random_seed = reader.int64();
+ break;
+ case 40:
+ message.type = reader.string();
+ break;
+ case 31:
+ message.delta = reader.float();
+ break;
+ case 39:
+ message.momentum2 = reader.float();
+ break;
+ case 38:
+ message.rms_decay = reader.float();
+ break;
+ case 23:
+ message.debug_info = reader.bool();
+ break;
+ case 28:
+ message.snapshot_after_train = reader.bool();
+ break;
+ case 30:
+ message.solver_type = reader.int32();
+ break;
+ case 41:
+ message.layer_wise_reduce = reader.bool();
+ break;
+ case 42:
+ message.weights.push(reader.string());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SolverParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "net":
+ message.net = reader.string();
+ break;
+ case "net_param":
+ message.net_param = $root.caffe.NetParameter.decodeText(reader);
+ break;
+ case "train_net":
+ message.train_net = reader.string();
+ break;
+ case "test_net":
+ reader.array(message.test_net, () => reader.string());
+ break;
+ case "train_net_param":
+ message.train_net_param = $root.caffe.NetParameter.decodeText(reader);
+ break;
+ case "test_net_param":
+ message.test_net_param.push($root.caffe.NetParameter.decodeText(reader));
+ break;
+ case "train_state":
+ message.train_state = $root.caffe.NetState.decodeText(reader);
+ break;
+ case "test_state":
+ message.test_state.push($root.caffe.NetState.decodeText(reader));
+ break;
+ case "test_iter":
+ reader.array(message.test_iter, () => reader.int32());
+ break;
+ case "test_interval":
+ message.test_interval = reader.int32();
+ break;
+ case "test_compute_loss":
+ message.test_compute_loss = reader.bool();
+ break;
+ case "test_initialization":
+ message.test_initialization = reader.bool();
+ break;
+ case "base_lr":
+ message.base_lr = reader.float();
+ break;
+ case "display":
+ message.display = reader.int32();
+ break;
+ case "average_loss":
+ message.average_loss = reader.int32();
+ break;
+ case "max_iter":
+ message.max_iter = reader.int32();
+ break;
+ case "iter_size":
+ message.iter_size = reader.int32();
+ break;
+ case "lr_policy":
+ message.lr_policy = reader.string();
+ break;
+ case "gamma":
+ message.gamma = reader.float();
+ break;
+ case "power":
+ message.power = reader.float();
+ break;
+ case "momentum":
+ message.momentum = reader.float();
+ break;
+ case "weight_decay":
+ message.weight_decay = reader.float();
+ break;
+ case "regularization_type":
+ message.regularization_type = reader.string();
+ break;
+ case "stepsize":
+ message.stepsize = reader.int32();
+ break;
+ case "stepvalue":
+ reader.array(message.stepvalue, () => reader.int32());
+ break;
+ case "clip_gradients":
+ message.clip_gradients = reader.float();
+ break;
+ case "snapshot":
+ message.snapshot = reader.int32();
+ break;
+ case "snapshot_prefix":
+ message.snapshot_prefix = reader.string();
+ break;
+ case "snapshot_diff":
+ message.snapshot_diff = reader.bool();
+ break;
+ case "snapshot_format":
+ message.snapshot_format = reader.enum($root.caffe.SolverParameter.SnapshotFormat);
+ break;
+ case "solver_mode":
+ message.solver_mode = reader.enum($root.caffe.SolverParameter.SolverMode);
+ break;
+ case "device_id":
+ message.device_id = reader.int32();
+ break;
+ case "random_seed":
+ message.random_seed = reader.int64();
+ break;
+ case "type":
+ message.type = reader.string();
+ break;
+ case "delta":
+ message.delta = reader.float();
+ break;
+ case "momentum2":
+ message.momentum2 = reader.float();
+ break;
+ case "rms_decay":
+ message.rms_decay = reader.float();
+ break;
+ case "debug_info":
+ message.debug_info = reader.bool();
+ break;
+ case "snapshot_after_train":
+ message.snapshot_after_train = reader.bool();
+ break;
+ case "solver_type":
+ message.solver_type = reader.enum($root.caffe.SolverParameter.SolverType);
+ break;
+ case "layer_wise_reduce":
+ message.layer_wise_reduce = reader.bool();
+ break;
+ case "weights":
+ reader.array(message.weights, () => reader.string());
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SolverParameter.prototype.net = "";
+$root.caffe.SolverParameter.prototype.net_param = null;
+$root.caffe.SolverParameter.prototype.train_net = "";
+$root.caffe.SolverParameter.prototype.train_net_param = null;
+$root.caffe.SolverParameter.prototype.train_state = null;
+$root.caffe.SolverParameter.prototype.test_interval = 0;
+$root.caffe.SolverParameter.prototype.test_compute_loss = false;
+$root.caffe.SolverParameter.prototype.test_initialization = true;
+$root.caffe.SolverParameter.prototype.base_lr = 0;
+$root.caffe.SolverParameter.prototype.display = 0;
+$root.caffe.SolverParameter.prototype.average_loss = 1;
+$root.caffe.SolverParameter.prototype.max_iter = 0;
+$root.caffe.SolverParameter.prototype.iter_size = 1;
+$root.caffe.SolverParameter.prototype.lr_policy = "";
+$root.caffe.SolverParameter.prototype.gamma = 0;
+$root.caffe.SolverParameter.prototype.power = 0;
+$root.caffe.SolverParameter.prototype.momentum = 0;
+$root.caffe.SolverParameter.prototype.weight_decay = 0;
+$root.caffe.SolverParameter.prototype.regularization_type = "L2";
+$root.caffe.SolverParameter.prototype.stepsize = 0;
+$root.caffe.SolverParameter.prototype.clip_gradients = -1;
+$root.caffe.SolverParameter.prototype.snapshot = 0;
+$root.caffe.SolverParameter.prototype.snapshot_prefix = "";
+$root.caffe.SolverParameter.prototype.snapshot_diff = false;
+$root.caffe.SolverParameter.prototype.snapshot_format = 1;
+$root.caffe.SolverParameter.prototype.solver_mode = 1;
+$root.caffe.SolverParameter.prototype.device_id = 0;
+$root.caffe.SolverParameter.prototype.random_seed = protobuf.Int64.create(-1);
+$root.caffe.SolverParameter.prototype.type = "SGD";
+$root.caffe.SolverParameter.prototype.delta = 1e-8;
+$root.caffe.SolverParameter.prototype.momentum2 = 0.999;
+$root.caffe.SolverParameter.prototype.rms_decay = 0.99;
+$root.caffe.SolverParameter.prototype.debug_info = false;
+$root.caffe.SolverParameter.prototype.snapshot_after_train = true;
+$root.caffe.SolverParameter.prototype.solver_type = 0;
+$root.caffe.SolverParameter.prototype.layer_wise_reduce = true;
+
+$root.caffe.SolverParameter.SnapshotFormat = {
+ "HDF5": 0,
+ "BINARYPROTO": 1
+};
+
+$root.caffe.SolverParameter.SolverMode = {
+ "CPU": 0,
+ "GPU": 1
+};
+
+$root.caffe.SolverParameter.SolverType = {
+ "SGD": 0,
+ "NESTEROV": 1,
+ "ADAGRAD": 2,
+ "RMSPROP": 3,
+ "ADADELTA": 4,
+ "ADAM": 5
+};
+
+$root.caffe.SolverState = class SolverState {
+
+ constructor() {
+ this.history = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SolverState();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.iter = reader.int32();
+ break;
+ case 2:
+ message.learned_net = reader.string();
+ break;
+ case 3:
+ message.history.push($root.caffe.BlobProto.decode(reader, reader.uint32()));
+ break;
+ case 4:
+ message.current_step = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SolverState();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "iter":
+ message.iter = reader.int32();
+ break;
+ case "learned_net":
+ message.learned_net = reader.string();
+ break;
+ case "history":
+ message.history.push($root.caffe.BlobProto.decodeText(reader));
+ break;
+ case "current_step":
+ message.current_step = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SolverState.prototype.iter = 0;
+$root.caffe.SolverState.prototype.learned_net = "";
+$root.caffe.SolverState.prototype.current_step = 0;
+
+$root.caffe.Phase = {
+ "TRAIN": 0,
+ "TEST": 1
+};
+
+$root.caffe.NetState = class NetState {
+
+ constructor() {
+ this.stage = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.NetState();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.phase = reader.int32();
+ break;
+ case 2:
+ message.level = reader.int32();
+ break;
+ case 3:
+ message.stage.push(reader.string());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.NetState();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "phase":
+ message.phase = reader.enum($root.caffe.Phase);
+ break;
+ case "level":
+ message.level = reader.int32();
+ break;
+ case "stage":
+ reader.array(message.stage, () => reader.string());
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.NetState.prototype.phase = 1;
+$root.caffe.NetState.prototype.level = 0;
+
+$root.caffe.NetStateRule = class NetStateRule {
+
+ constructor() {
+ this.stage = [];
+ this.not_stage = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.NetStateRule();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.phase = reader.int32();
+ break;
+ case 2:
+ message.min_level = reader.int32();
+ break;
+ case 3:
+ message.max_level = reader.int32();
+ break;
+ case 4:
+ message.stage.push(reader.string());
+ break;
+ case 5:
+ message.not_stage.push(reader.string());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.NetStateRule();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "phase":
+ message.phase = reader.enum($root.caffe.Phase);
+ break;
+ case "min_level":
+ message.min_level = reader.int32();
+ break;
+ case "max_level":
+ message.max_level = reader.int32();
+ break;
+ case "stage":
+ reader.array(message.stage, () => reader.string());
+ break;
+ case "not_stage":
+ reader.array(message.not_stage, () => reader.string());
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.NetStateRule.prototype.phase = 0;
+$root.caffe.NetStateRule.prototype.min_level = 0;
+$root.caffe.NetStateRule.prototype.max_level = 0;
+
+$root.caffe.ParamSpec = class ParamSpec {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ParamSpec();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 2:
+ message.share_mode = reader.int32();
+ break;
+ case 3:
+ message.lr_mult = reader.float();
+ break;
+ case 4:
+ message.decay_mult = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ParamSpec();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "name":
+ message.name = reader.string();
+ break;
+ case "share_mode":
+ message.share_mode = reader.enum($root.caffe.ParamSpec.DimCheckMode);
+ break;
+ case "lr_mult":
+ message.lr_mult = reader.float();
+ break;
+ case "decay_mult":
+ message.decay_mult = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ParamSpec.prototype.name = "";
+$root.caffe.ParamSpec.prototype.share_mode = 0;
+$root.caffe.ParamSpec.prototype.lr_mult = 1;
+$root.caffe.ParamSpec.prototype.decay_mult = 1;
+
+$root.caffe.ParamSpec.DimCheckMode = {
+ "STRICT": 0,
+ "PERMISSIVE": 1
+};
+
+$root.caffe.LayerParameter = class LayerParameter {
+
+ constructor() {
+ this.bottom = [];
+ this.top = [];
+ this.loss_weight = [];
+ this.param = [];
+ this.blobs = [];
+ this.propagate_down = [];
+ this.include = [];
+ this.exclude = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.LayerParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 2:
+ message.type = reader.string();
+ break;
+ case 3:
+ message.bottom.push(reader.string());
+ break;
+ case 4:
+ message.top.push(reader.string());
+ break;
+ case 10:
+ message.phase = reader.int32();
+ break;
+ case 5:
+ message.loss_weight = reader.floats(message.loss_weight, tag);
+ break;
+ case 6:
+ message.param.push($root.caffe.ParamSpec.decode(reader, reader.uint32()));
+ break;
+ case 7:
+ message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32()));
+ break;
+ case 11:
+ message.propagate_down = reader.array(message.propagate_down, () => reader.bool(), tag);
+ break;
+ case 8:
+ message.include.push($root.caffe.NetStateRule.decode(reader, reader.uint32()));
+ break;
+ case 9:
+ message.exclude.push($root.caffe.NetStateRule.decode(reader, reader.uint32()));
+ break;
+ case 100:
+ message.transform_param = $root.caffe.TransformationParameter.decode(reader, reader.uint32());
+ break;
+ case 101:
+ message.loss_param = $root.caffe.LossParameter.decode(reader, reader.uint32());
+ break;
+ case 102:
+ message.accuracy_param = $root.caffe.AccuracyParameter.decode(reader, reader.uint32());
+ break;
+ case 103:
+ message.argmax_param = $root.caffe.ArgMaxParameter.decode(reader, reader.uint32());
+ break;
+ case 139:
+ message.batch_norm_param = $root.caffe.BatchNormParameter.decode(reader, reader.uint32());
+ break;
+ case 141:
+ message.bias_param = $root.caffe.BiasParameter.decode(reader, reader.uint32());
+ break;
+ case 148:
+ message.clip_param = $root.caffe.ClipParameter.decode(reader, reader.uint32());
+ break;
+ case 104:
+ message.concat_param = $root.caffe.ConcatParameter.decode(reader, reader.uint32());
+ break;
+ case 105:
+ message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decode(reader, reader.uint32());
+ break;
+ case 106:
+ message.convolution_param = $root.caffe.ConvolutionParameter.decode(reader, reader.uint32());
+ break;
+ case 144:
+ message.crop_param = $root.caffe.CropParameter.decode(reader, reader.uint32());
+ break;
+ case 107:
+ message.data_param = $root.caffe.DataParameter.decode(reader, reader.uint32());
+ break;
+ case 108:
+ message.dropout_param = $root.caffe.DropoutParameter.decode(reader, reader.uint32());
+ break;
+ case 109:
+ message.dummy_data_param = $root.caffe.DummyDataParameter.decode(reader, reader.uint32());
+ break;
+ case 110:
+ message.eltwise_param = $root.caffe.EltwiseParameter.decode(reader, reader.uint32());
+ break;
+ case 140:
+ message.elu_param = $root.caffe.ELUParameter.decode(reader, reader.uint32());
+ break;
+ case 137:
+ message.embed_param = $root.caffe.EmbedParameter.decode(reader, reader.uint32());
+ break;
+ case 111:
+ message.exp_param = $root.caffe.ExpParameter.decode(reader, reader.uint32());
+ break;
+ case 135:
+ message.flatten_param = $root.caffe.FlattenParameter.decode(reader, reader.uint32());
+ break;
+ case 112:
+ message.hdf5_data_param = $root.caffe.HDF5DataParameter.decode(reader, reader.uint32());
+ break;
+ case 113:
+ message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32());
+ break;
+ case 114:
+ message.hinge_loss_param = $root.caffe.HingeLossParameter.decode(reader, reader.uint32());
+ break;
+ case 115:
+ message.image_data_param = $root.caffe.ImageDataParameter.decode(reader, reader.uint32());
+ break;
+ case 116:
+ message.infogain_loss_param = $root.caffe.InfogainLossParameter.decode(reader, reader.uint32());
+ break;
+ case 117:
+ message.inner_product_param = $root.caffe.InnerProductParameter.decode(reader, reader.uint32());
+ break;
+ case 143:
+ message.input_param = $root.caffe.InputParameter.decode(reader, reader.uint32());
+ break;
+ case 134:
+ message.log_param = $root.caffe.LogParameter.decode(reader, reader.uint32());
+ break;
+ case 118:
+ message.lrn_param = $root.caffe.LRNParameter.decode(reader, reader.uint32());
+ break;
+ case 119:
+ message.memory_data_param = $root.caffe.MemoryDataParameter.decode(reader, reader.uint32());
+ break;
+ case 120:
+ message.mvn_param = $root.caffe.MVNParameter.decode(reader, reader.uint32());
+ break;
+ case 145:
+ message.parameter_param = $root.caffe.ParameterParameter.decode(reader, reader.uint32());
+ break;
+ case 121:
+ message.pooling_param = $root.caffe.PoolingParameter.decode(reader, reader.uint32());
+ break;
+ case 122:
+ message.power_param = $root.caffe.PowerParameter.decode(reader, reader.uint32());
+ break;
+ case 131:
+ message.prelu_param = $root.caffe.PReLUParameter.decode(reader, reader.uint32());
+ break;
+ case 130:
+ message.python_param = $root.caffe.PythonParameter.decode(reader, reader.uint32());
+ break;
+ case 146:
+ message.recurrent_param = $root.caffe.RecurrentParameter.decode(reader, reader.uint32());
+ break;
+ case 136:
+ message.reduction_param = $root.caffe.ReductionParameter.decode(reader, reader.uint32());
+ break;
+ case 123:
+ message.relu_param = $root.caffe.ReLUParameter.decode(reader, reader.uint32());
+ break;
+ case 133:
+ message.reshape_param = $root.caffe.ReshapeParameter.decode(reader, reader.uint32());
+ break;
+ case 142:
+ message.scale_param = $root.caffe.ScaleParameter.decode(reader, reader.uint32());
+ break;
+ case 124:
+ message.sigmoid_param = $root.caffe.SigmoidParameter.decode(reader, reader.uint32());
+ break;
+ case 125:
+ message.softmax_param = $root.caffe.SoftmaxParameter.decode(reader, reader.uint32());
+ break;
+ case 132:
+ message.spp_param = $root.caffe.SPPParameter.decode(reader, reader.uint32());
+ break;
+ case 126:
+ message.slice_param = $root.caffe.SliceParameter.decode(reader, reader.uint32());
+ break;
+ case 147:
+ message.swish_param = $root.caffe.SwishParameter.decode(reader, reader.uint32());
+ break;
+ case 127:
+ message.tanh_param = $root.caffe.TanHParameter.decode(reader, reader.uint32());
+ break;
+ case 128:
+ message.threshold_param = $root.caffe.ThresholdParameter.decode(reader, reader.uint32());
+ break;
+ case 138:
+ message.tile_param = $root.caffe.TileParameter.decode(reader, reader.uint32());
+ break;
+ case 129:
+ message.window_data_param = $root.caffe.WindowDataParameter.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.LayerParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "name":
+ message.name = reader.string();
+ break;
+ case "type":
+ message.type = reader.string();
+ break;
+ case "bottom":
+ reader.array(message.bottom, () => reader.string());
+ break;
+ case "top":
+ reader.array(message.top, () => reader.string());
+ break;
+ case "phase":
+ message.phase = reader.enum($root.caffe.Phase);
+ break;
+ case "loss_weight":
+ reader.array(message.loss_weight, () => reader.float());
+ break;
+ case "param":
+ message.param.push($root.caffe.ParamSpec.decodeText(reader));
+ break;
+ case "blobs":
+ message.blobs.push($root.caffe.BlobProto.decodeText(reader));
+ break;
+ case "propagate_down":
+ reader.array(message.propagate_down, () => reader.bool());
+ break;
+ case "include":
+ message.include.push($root.caffe.NetStateRule.decodeText(reader));
+ break;
+ case "exclude":
+ message.exclude.push($root.caffe.NetStateRule.decodeText(reader));
+ break;
+ case "transform_param":
+ message.transform_param = $root.caffe.TransformationParameter.decodeText(reader);
+ break;
+ case "loss_param":
+ message.loss_param = $root.caffe.LossParameter.decodeText(reader);
+ break;
+ case "accuracy_param":
+ message.accuracy_param = $root.caffe.AccuracyParameter.decodeText(reader);
+ break;
+ case "argmax_param":
+ message.argmax_param = $root.caffe.ArgMaxParameter.decodeText(reader);
+ break;
+ case "batch_norm_param":
+ message.batch_norm_param = $root.caffe.BatchNormParameter.decodeText(reader);
+ break;
+ case "bias_param":
+ message.bias_param = $root.caffe.BiasParameter.decodeText(reader);
+ break;
+ case "clip_param":
+ message.clip_param = $root.caffe.ClipParameter.decodeText(reader);
+ break;
+ case "concat_param":
+ message.concat_param = $root.caffe.ConcatParameter.decodeText(reader);
+ break;
+ case "contrastive_loss_param":
+ message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decodeText(reader);
+ break;
+ case "convolution_param":
+ message.convolution_param = $root.caffe.ConvolutionParameter.decodeText(reader);
+ break;
+ case "crop_param":
+ message.crop_param = $root.caffe.CropParameter.decodeText(reader);
+ break;
+ case "data_param":
+ message.data_param = $root.caffe.DataParameter.decodeText(reader);
+ break;
+ case "dropout_param":
+ message.dropout_param = $root.caffe.DropoutParameter.decodeText(reader);
+ break;
+ case "dummy_data_param":
+ message.dummy_data_param = $root.caffe.DummyDataParameter.decodeText(reader);
+ break;
+ case "eltwise_param":
+ message.eltwise_param = $root.caffe.EltwiseParameter.decodeText(reader);
+ break;
+ case "elu_param":
+ message.elu_param = $root.caffe.ELUParameter.decodeText(reader);
+ break;
+ case "embed_param":
+ message.embed_param = $root.caffe.EmbedParameter.decodeText(reader);
+ break;
+ case "exp_param":
+ message.exp_param = $root.caffe.ExpParameter.decodeText(reader);
+ break;
+ case "flatten_param":
+ message.flatten_param = $root.caffe.FlattenParameter.decodeText(reader);
+ break;
+ case "hdf5_data_param":
+ message.hdf5_data_param = $root.caffe.HDF5DataParameter.decodeText(reader);
+ break;
+ case "hdf5_output_param":
+ message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader);
+ break;
+ case "hinge_loss_param":
+ message.hinge_loss_param = $root.caffe.HingeLossParameter.decodeText(reader);
+ break;
+ case "image_data_param":
+ message.image_data_param = $root.caffe.ImageDataParameter.decodeText(reader);
+ break;
+ case "infogain_loss_param":
+ message.infogain_loss_param = $root.caffe.InfogainLossParameter.decodeText(reader);
+ break;
+ case "inner_product_param":
+ message.inner_product_param = $root.caffe.InnerProductParameter.decodeText(reader);
+ break;
+ case "input_param":
+ message.input_param = $root.caffe.InputParameter.decodeText(reader);
+ break;
+ case "log_param":
+ message.log_param = $root.caffe.LogParameter.decodeText(reader);
+ break;
+ case "lrn_param":
+ message.lrn_param = $root.caffe.LRNParameter.decodeText(reader);
+ break;
+ case "memory_data_param":
+ message.memory_data_param = $root.caffe.MemoryDataParameter.decodeText(reader);
+ break;
+ case "mvn_param":
+ message.mvn_param = $root.caffe.MVNParameter.decodeText(reader);
+ break;
+ case "parameter_param":
+ message.parameter_param = $root.caffe.ParameterParameter.decodeText(reader);
+ break;
+ case "pooling_param":
+ message.pooling_param = $root.caffe.PoolingParameter.decodeText(reader);
+ break;
+ case "power_param":
+ message.power_param = $root.caffe.PowerParameter.decodeText(reader);
+ break;
+ case "prelu_param":
+ message.prelu_param = $root.caffe.PReLUParameter.decodeText(reader);
+ break;
+ case "python_param":
+ message.python_param = $root.caffe.PythonParameter.decodeText(reader);
+ break;
+ case "recurrent_param":
+ message.recurrent_param = $root.caffe.RecurrentParameter.decodeText(reader);
+ break;
+ case "reduction_param":
+ message.reduction_param = $root.caffe.ReductionParameter.decodeText(reader);
+ break;
+ case "relu_param":
+ message.relu_param = $root.caffe.ReLUParameter.decodeText(reader);
+ break;
+ case "reshape_param":
+ message.reshape_param = $root.caffe.ReshapeParameter.decodeText(reader);
+ break;
+ case "scale_param":
+ message.scale_param = $root.caffe.ScaleParameter.decodeText(reader);
+ break;
+ case "sigmoid_param":
+ message.sigmoid_param = $root.caffe.SigmoidParameter.decodeText(reader);
+ break;
+ case "softmax_param":
+ message.softmax_param = $root.caffe.SoftmaxParameter.decodeText(reader);
+ break;
+ case "spp_param":
+ message.spp_param = $root.caffe.SPPParameter.decodeText(reader);
+ break;
+ case "slice_param":
+ message.slice_param = $root.caffe.SliceParameter.decodeText(reader);
+ break;
+ case "swish_param":
+ message.swish_param = $root.caffe.SwishParameter.decodeText(reader);
+ break;
+ case "tanh_param":
+ message.tanh_param = $root.caffe.TanHParameter.decodeText(reader);
+ break;
+ case "threshold_param":
+ message.threshold_param = $root.caffe.ThresholdParameter.decodeText(reader);
+ break;
+ case "tile_param":
+ message.tile_param = $root.caffe.TileParameter.decodeText(reader);
+ break;
+ case "window_data_param":
+ message.window_data_param = $root.caffe.WindowDataParameter.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.LayerParameter.prototype.name = "";
+$root.caffe.LayerParameter.prototype.type = "";
+$root.caffe.LayerParameter.prototype.phase = 0;
+$root.caffe.LayerParameter.prototype.transform_param = null;
+$root.caffe.LayerParameter.prototype.loss_param = null;
+$root.caffe.LayerParameter.prototype.accuracy_param = null;
+$root.caffe.LayerParameter.prototype.argmax_param = null;
+$root.caffe.LayerParameter.prototype.batch_norm_param = null;
+$root.caffe.LayerParameter.prototype.bias_param = null;
+$root.caffe.LayerParameter.prototype.clip_param = null;
+$root.caffe.LayerParameter.prototype.concat_param = null;
+$root.caffe.LayerParameter.prototype.contrastive_loss_param = null;
+$root.caffe.LayerParameter.prototype.convolution_param = null;
+$root.caffe.LayerParameter.prototype.crop_param = null;
+$root.caffe.LayerParameter.prototype.data_param = null;
+$root.caffe.LayerParameter.prototype.dropout_param = null;
+$root.caffe.LayerParameter.prototype.dummy_data_param = null;
+$root.caffe.LayerParameter.prototype.eltwise_param = null;
+$root.caffe.LayerParameter.prototype.elu_param = null;
+$root.caffe.LayerParameter.prototype.embed_param = null;
+$root.caffe.LayerParameter.prototype.exp_param = null;
+$root.caffe.LayerParameter.prototype.flatten_param = null;
+$root.caffe.LayerParameter.prototype.hdf5_data_param = null;
+$root.caffe.LayerParameter.prototype.hdf5_output_param = null;
+$root.caffe.LayerParameter.prototype.hinge_loss_param = null;
+$root.caffe.LayerParameter.prototype.image_data_param = null;
+$root.caffe.LayerParameter.prototype.infogain_loss_param = null;
+$root.caffe.LayerParameter.prototype.inner_product_param = null;
+$root.caffe.LayerParameter.prototype.input_param = null;
+$root.caffe.LayerParameter.prototype.log_param = null;
+$root.caffe.LayerParameter.prototype.lrn_param = null;
+$root.caffe.LayerParameter.prototype.memory_data_param = null;
+$root.caffe.LayerParameter.prototype.mvn_param = null;
+$root.caffe.LayerParameter.prototype.parameter_param = null;
+$root.caffe.LayerParameter.prototype.pooling_param = null;
+$root.caffe.LayerParameter.prototype.power_param = null;
+$root.caffe.LayerParameter.prototype.prelu_param = null;
+$root.caffe.LayerParameter.prototype.python_param = null;
+$root.caffe.LayerParameter.prototype.recurrent_param = null;
+$root.caffe.LayerParameter.prototype.reduction_param = null;
+$root.caffe.LayerParameter.prototype.relu_param = null;
+$root.caffe.LayerParameter.prototype.reshape_param = null;
+$root.caffe.LayerParameter.prototype.scale_param = null;
+$root.caffe.LayerParameter.prototype.sigmoid_param = null;
+$root.caffe.LayerParameter.prototype.softmax_param = null;
+$root.caffe.LayerParameter.prototype.spp_param = null;
+$root.caffe.LayerParameter.prototype.slice_param = null;
+$root.caffe.LayerParameter.prototype.swish_param = null;
+$root.caffe.LayerParameter.prototype.tanh_param = null;
+$root.caffe.LayerParameter.prototype.threshold_param = null;
+$root.caffe.LayerParameter.prototype.tile_param = null;
+$root.caffe.LayerParameter.prototype.window_data_param = null;
+
+$root.caffe.TransformationParameter = class TransformationParameter {
+
+ constructor() {
+ this.mean_value = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.TransformationParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.scale = reader.float();
+ break;
+ case 2:
+ message.mirror = reader.bool();
+ break;
+ case 3:
+ message.crop_size = reader.uint32();
+ break;
+ case 4:
+ message.mean_file = reader.string();
+ break;
+ case 5:
+ message.mean_value = reader.floats(message.mean_value, tag);
+ break;
+ case 6:
+ message.force_color = reader.bool();
+ break;
+ case 7:
+ message.force_gray = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.TransformationParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "mirror":
+ message.mirror = reader.bool();
+ break;
+ case "crop_size":
+ message.crop_size = reader.uint32();
+ break;
+ case "mean_file":
+ message.mean_file = reader.string();
+ break;
+ case "mean_value":
+ reader.array(message.mean_value, () => reader.float());
+ break;
+ case "force_color":
+ message.force_color = reader.bool();
+ break;
+ case "force_gray":
+ message.force_gray = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.TransformationParameter.prototype.scale = 1;
+$root.caffe.TransformationParameter.prototype.mirror = false;
+$root.caffe.TransformationParameter.prototype.crop_size = 0;
+$root.caffe.TransformationParameter.prototype.mean_file = "";
+$root.caffe.TransformationParameter.prototype.force_color = false;
+$root.caffe.TransformationParameter.prototype.force_gray = false;
+
+$root.caffe.LossParameter = class LossParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.LossParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.ignore_label = reader.int32();
+ break;
+ case 3:
+ message.normalization = reader.int32();
+ break;
+ case 2:
+ message.normalize = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.LossParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "ignore_label":
+ message.ignore_label = reader.int32();
+ break;
+ case "normalization":
+ message.normalization = reader.enum($root.caffe.LossParameter.NormalizationMode);
+ break;
+ case "normalize":
+ message.normalize = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.LossParameter.prototype.ignore_label = 0;
+$root.caffe.LossParameter.prototype.normalization = 1;
+$root.caffe.LossParameter.prototype.normalize = false;
+
+$root.caffe.LossParameter.NormalizationMode = {
+ "FULL": 0,
+ "VALID": 1,
+ "BATCH_SIZE": 2,
+ "NONE": 3
+};
+
+$root.caffe.AccuracyParameter = class AccuracyParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.AccuracyParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.top_k = reader.uint32();
+ break;
+ case 2:
+ message.axis = reader.int32();
+ break;
+ case 3:
+ message.ignore_label = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.AccuracyParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "top_k":
+ message.top_k = reader.uint32();
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "ignore_label":
+ message.ignore_label = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.AccuracyParameter.prototype.top_k = 1;
+$root.caffe.AccuracyParameter.prototype.axis = 1;
+$root.caffe.AccuracyParameter.prototype.ignore_label = 0;
+
+$root.caffe.ArgMaxParameter = class ArgMaxParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ArgMaxParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.out_max_val = reader.bool();
+ break;
+ case 2:
+ message.top_k = reader.uint32();
+ break;
+ case 3:
+ message.axis = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ArgMaxParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "out_max_val":
+ message.out_max_val = reader.bool();
+ break;
+ case "top_k":
+ message.top_k = reader.uint32();
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ArgMaxParameter.prototype.out_max_val = false;
+$root.caffe.ArgMaxParameter.prototype.top_k = 1;
+$root.caffe.ArgMaxParameter.prototype.axis = 0;
+
+$root.caffe.ClipParameter = class ClipParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ClipParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.min = reader.float();
+ break;
+ case 2:
+ message.max = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ClipParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "min":
+ message.min = reader.float();
+ break;
+ case "max":
+ message.max = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ClipParameter.prototype.min = 0;
+$root.caffe.ClipParameter.prototype.max = 0;
+
+$root.caffe.ConcatParameter = class ConcatParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ConcatParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 2:
+ message.axis = reader.int32();
+ break;
+ case 1:
+ message.concat_dim = reader.uint32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ConcatParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "concat_dim":
+ message.concat_dim = reader.uint32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ConcatParameter.prototype.axis = 1;
+$root.caffe.ConcatParameter.prototype.concat_dim = 1;
+
+$root.caffe.BatchNormParameter = class BatchNormParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.BatchNormParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.use_global_stats = reader.bool();
+ break;
+ case 2:
+ message.moving_average_fraction = reader.float();
+ break;
+ case 3:
+ message.eps = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.BatchNormParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "use_global_stats":
+ message.use_global_stats = reader.bool();
+ break;
+ case "moving_average_fraction":
+ message.moving_average_fraction = reader.float();
+ break;
+ case "eps":
+ message.eps = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.BatchNormParameter.prototype.use_global_stats = false;
+$root.caffe.BatchNormParameter.prototype.moving_average_fraction = 0.999;
+$root.caffe.BatchNormParameter.prototype.eps = 0.00001;
+
+$root.caffe.BiasParameter = class BiasParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.BiasParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.axis = reader.int32();
+ break;
+ case 2:
+ message.num_axes = reader.int32();
+ break;
+ case 3:
+ message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.BiasParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "num_axes":
+ message.num_axes = reader.int32();
+ break;
+ case "filler":
+ message.filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.BiasParameter.prototype.axis = 1;
+$root.caffe.BiasParameter.prototype.num_axes = 1;
+$root.caffe.BiasParameter.prototype.filler = null;
+
+$root.caffe.ContrastiveLossParameter = class ContrastiveLossParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ContrastiveLossParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.margin = reader.float();
+ break;
+ case 2:
+ message.legacy_version = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ContrastiveLossParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "margin":
+ message.margin = reader.float();
+ break;
+ case "legacy_version":
+ message.legacy_version = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ContrastiveLossParameter.prototype.margin = 1;
+$root.caffe.ContrastiveLossParameter.prototype.legacy_version = false;
+
+$root.caffe.ConvolutionParameter = class ConvolutionParameter {
+
+ constructor() {
+ this.pad = [];
+ this.kernel_size = [];
+ this.stride = [];
+ this.dilation = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ConvolutionParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.num_output = reader.uint32();
+ break;
+ case 2:
+ message.bias_term = reader.bool();
+ break;
+ case 3:
+ message.pad = reader.array(message.pad, () => reader.uint32(), tag);
+ break;
+ case 4:
+ message.kernel_size = reader.array(message.kernel_size, () => reader.uint32(), tag);
+ break;
+ case 6:
+ message.stride = reader.array(message.stride, () => reader.uint32(), tag);
+ break;
+ case 18:
+ message.dilation = reader.array(message.dilation, () => reader.uint32(), tag);
+ break;
+ case 9:
+ message.pad_h = reader.uint32();
+ break;
+ case 10:
+ message.pad_w = reader.uint32();
+ break;
+ case 11:
+ message.kernel_h = reader.uint32();
+ break;
+ case 12:
+ message.kernel_w = reader.uint32();
+ break;
+ case 13:
+ message.stride_h = reader.uint32();
+ break;
+ case 14:
+ message.stride_w = reader.uint32();
+ break;
+ case 5:
+ message.group = reader.uint32();
+ break;
+ case 7:
+ message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 8:
+ message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 15:
+ message.engine = reader.int32();
+ break;
+ case 16:
+ message.axis = reader.int32();
+ break;
+ case 17:
+ message.force_nd_im2col = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ConvolutionParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "num_output":
+ message.num_output = reader.uint32();
+ break;
+ case "bias_term":
+ message.bias_term = reader.bool();
+ break;
+ case "pad":
+ reader.array(message.pad, () => reader.uint32());
+ break;
+ case "kernel_size":
+ reader.array(message.kernel_size, () => reader.uint32());
+ break;
+ case "stride":
+ reader.array(message.stride, () => reader.uint32());
+ break;
+ case "dilation":
+ reader.array(message.dilation, () => reader.uint32());
+ break;
+ case "pad_h":
+ message.pad_h = reader.uint32();
+ break;
+ case "pad_w":
+ message.pad_w = reader.uint32();
+ break;
+ case "kernel_h":
+ message.kernel_h = reader.uint32();
+ break;
+ case "kernel_w":
+ message.kernel_w = reader.uint32();
+ break;
+ case "stride_h":
+ message.stride_h = reader.uint32();
+ break;
+ case "stride_w":
+ message.stride_w = reader.uint32();
+ break;
+ case "group":
+ message.group = reader.uint32();
+ break;
+ case "weight_filler":
+ message.weight_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "bias_filler":
+ message.bias_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "engine":
+ message.engine = reader.enum($root.caffe.ConvolutionParameter.Engine);
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "force_nd_im2col":
+ message.force_nd_im2col = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ConvolutionParameter.prototype.num_output = 0;
+$root.caffe.ConvolutionParameter.prototype.bias_term = true;
+$root.caffe.ConvolutionParameter.prototype.pad_h = 0;
+$root.caffe.ConvolutionParameter.prototype.pad_w = 0;
+$root.caffe.ConvolutionParameter.prototype.kernel_h = 0;
+$root.caffe.ConvolutionParameter.prototype.kernel_w = 0;
+$root.caffe.ConvolutionParameter.prototype.stride_h = 0;
+$root.caffe.ConvolutionParameter.prototype.stride_w = 0;
+$root.caffe.ConvolutionParameter.prototype.group = 1;
+$root.caffe.ConvolutionParameter.prototype.weight_filler = null;
+$root.caffe.ConvolutionParameter.prototype.bias_filler = null;
+$root.caffe.ConvolutionParameter.prototype.engine = 0;
+$root.caffe.ConvolutionParameter.prototype.axis = 1;
+$root.caffe.ConvolutionParameter.prototype.force_nd_im2col = false;
+
+$root.caffe.ConvolutionParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.CropParameter = class CropParameter {
+
+ constructor() {
+ this.offset = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.CropParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.axis = reader.int32();
+ break;
+ case 2:
+ message.offset = reader.array(message.offset, () => reader.uint32(), tag);
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.CropParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "offset":
+ reader.array(message.offset, () => reader.uint32());
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.CropParameter.prototype.axis = 2;
+
+$root.caffe.DataParameter = class DataParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.DataParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.source = reader.string();
+ break;
+ case 4:
+ message.batch_size = reader.uint32();
+ break;
+ case 7:
+ message.rand_skip = reader.uint32();
+ break;
+ case 8:
+ message.backend = reader.int32();
+ break;
+ case 2:
+ message.scale = reader.float();
+ break;
+ case 3:
+ message.mean_file = reader.string();
+ break;
+ case 5:
+ message.crop_size = reader.uint32();
+ break;
+ case 6:
+ message.mirror = reader.bool();
+ break;
+ case 9:
+ message.force_encoded_color = reader.bool();
+ break;
+ case 10:
+ message.prefetch = reader.uint32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.DataParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "source":
+ message.source = reader.string();
+ break;
+ case "batch_size":
+ message.batch_size = reader.uint32();
+ break;
+ case "rand_skip":
+ message.rand_skip = reader.uint32();
+ break;
+ case "backend":
+ message.backend = reader.enum($root.caffe.DataParameter.DB);
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "mean_file":
+ message.mean_file = reader.string();
+ break;
+ case "crop_size":
+ message.crop_size = reader.uint32();
+ break;
+ case "mirror":
+ message.mirror = reader.bool();
+ break;
+ case "force_encoded_color":
+ message.force_encoded_color = reader.bool();
+ break;
+ case "prefetch":
+ message.prefetch = reader.uint32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.DataParameter.prototype.source = "";
+$root.caffe.DataParameter.prototype.batch_size = 0;
+$root.caffe.DataParameter.prototype.rand_skip = 0;
+$root.caffe.DataParameter.prototype.backend = 0;
+$root.caffe.DataParameter.prototype.scale = 1;
+$root.caffe.DataParameter.prototype.mean_file = "";
+$root.caffe.DataParameter.prototype.crop_size = 0;
+$root.caffe.DataParameter.prototype.mirror = false;
+$root.caffe.DataParameter.prototype.force_encoded_color = false;
+$root.caffe.DataParameter.prototype.prefetch = 4;
+
+$root.caffe.DataParameter.DB = {
+ "LEVELDB": 0,
+ "LMDB": 1
+};
+
+$root.caffe.DropoutParameter = class DropoutParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.DropoutParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.dropout_ratio = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.DropoutParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "dropout_ratio":
+ message.dropout_ratio = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.DropoutParameter.prototype.dropout_ratio = 0.5;
+
+$root.caffe.DummyDataParameter = class DummyDataParameter {
+
+ constructor() {
+ this.data_filler = [];
+ this.shape = [];
+ this.num = [];
+ this.channels = [];
+ this.height = [];
+ this.width = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.DummyDataParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.data_filler.push($root.caffe.FillerParameter.decode(reader, reader.uint32()));
+ break;
+ case 6:
+ message.shape.push($root.caffe.BlobShape.decode(reader, reader.uint32()));
+ break;
+ case 2:
+ message.num = reader.array(message.num, () => reader.uint32(), tag);
+ break;
+ case 3:
+ message.channels = reader.array(message.channels, () => reader.uint32(), tag);
+ break;
+ case 4:
+ message.height = reader.array(message.height, () => reader.uint32(), tag);
+ break;
+ case 5:
+ message.width = reader.array(message.width, () => reader.uint32(), tag);
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.DummyDataParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "data_filler":
+ message.data_filler.push($root.caffe.FillerParameter.decodeText(reader));
+ break;
+ case "shape":
+ message.shape.push($root.caffe.BlobShape.decodeText(reader));
+ break;
+ case "num":
+ reader.array(message.num, () => reader.uint32());
+ break;
+ case "channels":
+ reader.array(message.channels, () => reader.uint32());
+ break;
+ case "height":
+ reader.array(message.height, () => reader.uint32());
+ break;
+ case "width":
+ reader.array(message.width, () => reader.uint32());
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.EltwiseParameter = class EltwiseParameter {
+
+ constructor() {
+ this.coeff = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.EltwiseParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.operation = reader.int32();
+ break;
+ case 2:
+ message.coeff = reader.floats(message.coeff, tag);
+ break;
+ case 3:
+ message.stable_prod_grad = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.EltwiseParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "operation":
+ message.operation = reader.enum($root.caffe.EltwiseParameter.EltwiseOp);
+ break;
+ case "coeff":
+ reader.array(message.coeff, () => reader.float());
+ break;
+ case "stable_prod_grad":
+ message.stable_prod_grad = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.EltwiseParameter.prototype.operation = 1;
+$root.caffe.EltwiseParameter.prototype.stable_prod_grad = true;
+
+$root.caffe.EltwiseParameter.EltwiseOp = {
+ "PROD": 0,
+ "SUM": 1,
+ "MAX": 2
+};
+
+$root.caffe.ELUParameter = class ELUParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ELUParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.alpha = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ELUParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "alpha":
+ message.alpha = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ELUParameter.prototype.alpha = 1;
+
+$root.caffe.EmbedParameter = class EmbedParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.EmbedParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.num_output = reader.uint32();
+ break;
+ case 2:
+ message.input_dim = reader.uint32();
+ break;
+ case 3:
+ message.bias_term = reader.bool();
+ break;
+ case 4:
+ message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 5:
+ message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.EmbedParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "num_output":
+ message.num_output = reader.uint32();
+ break;
+ case "input_dim":
+ message.input_dim = reader.uint32();
+ break;
+ case "bias_term":
+ message.bias_term = reader.bool();
+ break;
+ case "weight_filler":
+ message.weight_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "bias_filler":
+ message.bias_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.EmbedParameter.prototype.num_output = 0;
+$root.caffe.EmbedParameter.prototype.input_dim = 0;
+$root.caffe.EmbedParameter.prototype.bias_term = true;
+$root.caffe.EmbedParameter.prototype.weight_filler = null;
+$root.caffe.EmbedParameter.prototype.bias_filler = null;
+
+$root.caffe.ExpParameter = class ExpParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ExpParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.base = reader.float();
+ break;
+ case 2:
+ message.scale = reader.float();
+ break;
+ case 3:
+ message.shift = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ExpParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "base":
+ message.base = reader.float();
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "shift":
+ message.shift = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ExpParameter.prototype.base = -1;
+$root.caffe.ExpParameter.prototype.scale = 1;
+$root.caffe.ExpParameter.prototype.shift = 0;
+
+$root.caffe.FlattenParameter = class FlattenParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.FlattenParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.axis = reader.int32();
+ break;
+ case 2:
+ message.end_axis = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.FlattenParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "end_axis":
+ message.end_axis = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.FlattenParameter.prototype.axis = 1;
+$root.caffe.FlattenParameter.prototype.end_axis = -1;
+
+$root.caffe.HDF5DataParameter = class HDF5DataParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.HDF5DataParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.source = reader.string();
+ break;
+ case 2:
+ message.batch_size = reader.uint32();
+ break;
+ case 3:
+ message.shuffle = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.HDF5DataParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "source":
+ message.source = reader.string();
+ break;
+ case "batch_size":
+ message.batch_size = reader.uint32();
+ break;
+ case "shuffle":
+ message.shuffle = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.HDF5DataParameter.prototype.source = "";
+$root.caffe.HDF5DataParameter.prototype.batch_size = 0;
+$root.caffe.HDF5DataParameter.prototype.shuffle = false;
+
+$root.caffe.HDF5OutputParameter = class HDF5OutputParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.HDF5OutputParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.file_name = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.HDF5OutputParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "file_name":
+ message.file_name = reader.string();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.HDF5OutputParameter.prototype.file_name = "";
+
+$root.caffe.HingeLossParameter = class HingeLossParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.HingeLossParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.norm = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.HingeLossParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "norm":
+ message.norm = reader.enum($root.caffe.HingeLossParameter.Norm);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.HingeLossParameter.prototype.norm = 1;
+
+$root.caffe.HingeLossParameter.Norm = {
+ "L1": 1,
+ "L2": 2
+};
+
+$root.caffe.ImageDataParameter = class ImageDataParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ImageDataParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.source = reader.string();
+ break;
+ case 4:
+ message.batch_size = reader.uint32();
+ break;
+ case 7:
+ message.rand_skip = reader.uint32();
+ break;
+ case 8:
+ message.shuffle = reader.bool();
+ break;
+ case 9:
+ message.new_height = reader.uint32();
+ break;
+ case 10:
+ message.new_width = reader.uint32();
+ break;
+ case 11:
+ message.is_color = reader.bool();
+ break;
+ case 2:
+ message.scale = reader.float();
+ break;
+ case 3:
+ message.mean_file = reader.string();
+ break;
+ case 5:
+ message.crop_size = reader.uint32();
+ break;
+ case 6:
+ message.mirror = reader.bool();
+ break;
+ case 12:
+ message.root_folder = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ImageDataParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "source":
+ message.source = reader.string();
+ break;
+ case "batch_size":
+ message.batch_size = reader.uint32();
+ break;
+ case "rand_skip":
+ message.rand_skip = reader.uint32();
+ break;
+ case "shuffle":
+ message.shuffle = reader.bool();
+ break;
+ case "new_height":
+ message.new_height = reader.uint32();
+ break;
+ case "new_width":
+ message.new_width = reader.uint32();
+ break;
+ case "is_color":
+ message.is_color = reader.bool();
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "mean_file":
+ message.mean_file = reader.string();
+ break;
+ case "crop_size":
+ message.crop_size = reader.uint32();
+ break;
+ case "mirror":
+ message.mirror = reader.bool();
+ break;
+ case "root_folder":
+ message.root_folder = reader.string();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ImageDataParameter.prototype.source = "";
+$root.caffe.ImageDataParameter.prototype.batch_size = 1;
+$root.caffe.ImageDataParameter.prototype.rand_skip = 0;
+$root.caffe.ImageDataParameter.prototype.shuffle = false;
+$root.caffe.ImageDataParameter.prototype.new_height = 0;
+$root.caffe.ImageDataParameter.prototype.new_width = 0;
+$root.caffe.ImageDataParameter.prototype.is_color = true;
+$root.caffe.ImageDataParameter.prototype.scale = 1;
+$root.caffe.ImageDataParameter.prototype.mean_file = "";
+$root.caffe.ImageDataParameter.prototype.crop_size = 0;
+$root.caffe.ImageDataParameter.prototype.mirror = false;
+$root.caffe.ImageDataParameter.prototype.root_folder = "";
+
+$root.caffe.InfogainLossParameter = class InfogainLossParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.InfogainLossParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.source = reader.string();
+ break;
+ case 2:
+ message.axis = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.InfogainLossParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "source":
+ message.source = reader.string();
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.InfogainLossParameter.prototype.source = "";
+$root.caffe.InfogainLossParameter.prototype.axis = 1;
+
+$root.caffe.InnerProductParameter = class InnerProductParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.InnerProductParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.num_output = reader.uint32();
+ break;
+ case 2:
+ message.bias_term = reader.bool();
+ break;
+ case 3:
+ message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 4:
+ message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 5:
+ message.axis = reader.int32();
+ break;
+ case 6:
+ message.transpose = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.InnerProductParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "num_output":
+ message.num_output = reader.uint32();
+ break;
+ case "bias_term":
+ message.bias_term = reader.bool();
+ break;
+ case "weight_filler":
+ message.weight_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "bias_filler":
+ message.bias_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "transpose":
+ message.transpose = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.InnerProductParameter.prototype.num_output = 0;
+$root.caffe.InnerProductParameter.prototype.bias_term = true;
+$root.caffe.InnerProductParameter.prototype.weight_filler = null;
+$root.caffe.InnerProductParameter.prototype.bias_filler = null;
+$root.caffe.InnerProductParameter.prototype.axis = 1;
+$root.caffe.InnerProductParameter.prototype.transpose = false;
+
+$root.caffe.InputParameter = class InputParameter {
+
+ constructor() {
+ this.shape = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.InputParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.shape.push($root.caffe.BlobShape.decode(reader, reader.uint32()));
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.InputParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "shape":
+ message.shape.push($root.caffe.BlobShape.decodeText(reader));
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.LogParameter = class LogParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.LogParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.base = reader.float();
+ break;
+ case 2:
+ message.scale = reader.float();
+ break;
+ case 3:
+ message.shift = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.LogParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "base":
+ message.base = reader.float();
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "shift":
+ message.shift = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.LogParameter.prototype.base = -1;
+$root.caffe.LogParameter.prototype.scale = 1;
+$root.caffe.LogParameter.prototype.shift = 0;
+
+$root.caffe.LRNParameter = class LRNParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.LRNParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.local_size = reader.uint32();
+ break;
+ case 2:
+ message.alpha = reader.float();
+ break;
+ case 3:
+ message.beta = reader.float();
+ break;
+ case 4:
+ message.norm_region = reader.int32();
+ break;
+ case 5:
+ message.k = reader.float();
+ break;
+ case 6:
+ message.engine = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.LRNParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "local_size":
+ message.local_size = reader.uint32();
+ break;
+ case "alpha":
+ message.alpha = reader.float();
+ break;
+ case "beta":
+ message.beta = reader.float();
+ break;
+ case "norm_region":
+ message.norm_region = reader.enum($root.caffe.LRNParameter.NormRegion);
+ break;
+ case "k":
+ message.k = reader.float();
+ break;
+ case "engine":
+ message.engine = reader.enum($root.caffe.LRNParameter.Engine);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.LRNParameter.prototype.local_size = 5;
+$root.caffe.LRNParameter.prototype.alpha = 1;
+$root.caffe.LRNParameter.prototype.beta = 0.75;
+$root.caffe.LRNParameter.prototype.norm_region = 0;
+$root.caffe.LRNParameter.prototype.k = 1;
+$root.caffe.LRNParameter.prototype.engine = 0;
+
+$root.caffe.LRNParameter.NormRegion = {
+ "ACROSS_CHANNELS": 0,
+ "WITHIN_CHANNEL": 1
+};
+
+$root.caffe.LRNParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.MemoryDataParameter = class MemoryDataParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.MemoryDataParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.batch_size = reader.uint32();
+ break;
+ case 2:
+ message.channels = reader.uint32();
+ break;
+ case 3:
+ message.height = reader.uint32();
+ break;
+ case 4:
+ message.width = reader.uint32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.MemoryDataParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "batch_size":
+ message.batch_size = reader.uint32();
+ break;
+ case "channels":
+ message.channels = reader.uint32();
+ break;
+ case "height":
+ message.height = reader.uint32();
+ break;
+ case "width":
+ message.width = reader.uint32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.MemoryDataParameter.prototype.batch_size = 0;
+$root.caffe.MemoryDataParameter.prototype.channels = 0;
+$root.caffe.MemoryDataParameter.prototype.height = 0;
+$root.caffe.MemoryDataParameter.prototype.width = 0;
+
+$root.caffe.MVNParameter = class MVNParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.MVNParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.normalize_variance = reader.bool();
+ break;
+ case 2:
+ message.across_channels = reader.bool();
+ break;
+ case 3:
+ message.eps = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.MVNParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "normalize_variance":
+ message.normalize_variance = reader.bool();
+ break;
+ case "across_channels":
+ message.across_channels = reader.bool();
+ break;
+ case "eps":
+ message.eps = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.MVNParameter.prototype.normalize_variance = true;
+$root.caffe.MVNParameter.prototype.across_channels = false;
+$root.caffe.MVNParameter.prototype.eps = 1e-9;
+
+$root.caffe.ParameterParameter = class ParameterParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ParameterParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ParameterParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "shape":
+ message.shape = $root.caffe.BlobShape.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ParameterParameter.prototype.shape = null;
+
+$root.caffe.PoolingParameter = class PoolingParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.PoolingParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.pool = reader.int32();
+ break;
+ case 4:
+ message.pad = reader.uint32();
+ break;
+ case 9:
+ message.pad_h = reader.uint32();
+ break;
+ case 10:
+ message.pad_w = reader.uint32();
+ break;
+ case 2:
+ message.kernel_size = reader.uint32();
+ break;
+ case 5:
+ message.kernel_h = reader.uint32();
+ break;
+ case 6:
+ message.kernel_w = reader.uint32();
+ break;
+ case 3:
+ message.stride = reader.uint32();
+ break;
+ case 7:
+ message.stride_h = reader.uint32();
+ break;
+ case 8:
+ message.stride_w = reader.uint32();
+ break;
+ case 11:
+ message.engine = reader.int32();
+ break;
+ case 12:
+ message.global_pooling = reader.bool();
+ break;
+ case 13:
+ message.round_mode = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.PoolingParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "pool":
+ message.pool = reader.enum($root.caffe.PoolingParameter.PoolMethod);
+ break;
+ case "pad":
+ message.pad = reader.uint32();
+ break;
+ case "pad_h":
+ message.pad_h = reader.uint32();
+ break;
+ case "pad_w":
+ message.pad_w = reader.uint32();
+ break;
+ case "kernel_size":
+ message.kernel_size = reader.uint32();
+ break;
+ case "kernel_h":
+ message.kernel_h = reader.uint32();
+ break;
+ case "kernel_w":
+ message.kernel_w = reader.uint32();
+ break;
+ case "stride":
+ message.stride = reader.uint32();
+ break;
+ case "stride_h":
+ message.stride_h = reader.uint32();
+ break;
+ case "stride_w":
+ message.stride_w = reader.uint32();
+ break;
+ case "engine":
+ message.engine = reader.enum($root.caffe.PoolingParameter.Engine);
+ break;
+ case "global_pooling":
+ message.global_pooling = reader.bool();
+ break;
+ case "round_mode":
+ message.round_mode = reader.enum($root.caffe.PoolingParameter.RoundMode);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.PoolingParameter.prototype.pool = 0;
+$root.caffe.PoolingParameter.prototype.pad = 0;
+$root.caffe.PoolingParameter.prototype.pad_h = 0;
+$root.caffe.PoolingParameter.prototype.pad_w = 0;
+$root.caffe.PoolingParameter.prototype.kernel_size = 0;
+$root.caffe.PoolingParameter.prototype.kernel_h = 0;
+$root.caffe.PoolingParameter.prototype.kernel_w = 0;
+$root.caffe.PoolingParameter.prototype.stride = 1;
+$root.caffe.PoolingParameter.prototype.stride_h = 0;
+$root.caffe.PoolingParameter.prototype.stride_w = 0;
+$root.caffe.PoolingParameter.prototype.engine = 0;
+$root.caffe.PoolingParameter.prototype.global_pooling = false;
+$root.caffe.PoolingParameter.prototype.round_mode = 0;
+
+$root.caffe.PoolingParameter.PoolMethod = {
+ "MAX": 0,
+ "AVE": 1,
+ "STOCHASTIC": 2
+};
+
+$root.caffe.PoolingParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.PoolingParameter.RoundMode = {
+ "CEIL": 0,
+ "FLOOR": 1
+};
+
+$root.caffe.PowerParameter = class PowerParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.PowerParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.power = reader.float();
+ break;
+ case 2:
+ message.scale = reader.float();
+ break;
+ case 3:
+ message.shift = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.PowerParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "power":
+ message.power = reader.float();
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "shift":
+ message.shift = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.PowerParameter.prototype.power = 1;
+$root.caffe.PowerParameter.prototype.scale = 1;
+$root.caffe.PowerParameter.prototype.shift = 0;
+
+$root.caffe.PythonParameter = class PythonParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.PythonParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.module = reader.string();
+ break;
+ case 2:
+ message.layer = reader.string();
+ break;
+ case 3:
+ message.param_str = reader.string();
+ break;
+ case 4:
+ message.share_in_parallel = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.PythonParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "module":
+ message.module = reader.string();
+ break;
+ case "layer":
+ message.layer = reader.string();
+ break;
+ case "param_str":
+ message.param_str = reader.string();
+ break;
+ case "share_in_parallel":
+ message.share_in_parallel = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.PythonParameter.prototype.module = "";
+$root.caffe.PythonParameter.prototype.layer = "";
+$root.caffe.PythonParameter.prototype.param_str = "";
+$root.caffe.PythonParameter.prototype.share_in_parallel = false;
+
+$root.caffe.RecurrentParameter = class RecurrentParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.RecurrentParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.num_output = reader.uint32();
+ break;
+ case 2:
+ message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 3:
+ message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 4:
+ message.debug_info = reader.bool();
+ break;
+ case 5:
+ message.expose_hidden = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.RecurrentParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "num_output":
+ message.num_output = reader.uint32();
+ break;
+ case "weight_filler":
+ message.weight_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "bias_filler":
+ message.bias_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "debug_info":
+ message.debug_info = reader.bool();
+ break;
+ case "expose_hidden":
+ message.expose_hidden = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.RecurrentParameter.prototype.num_output = 0;
+$root.caffe.RecurrentParameter.prototype.weight_filler = null;
+$root.caffe.RecurrentParameter.prototype.bias_filler = null;
+$root.caffe.RecurrentParameter.prototype.debug_info = false;
+$root.caffe.RecurrentParameter.prototype.expose_hidden = false;
+
+$root.caffe.ReductionParameter = class ReductionParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ReductionParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.operation = reader.int32();
+ break;
+ case 2:
+ message.axis = reader.int32();
+ break;
+ case 3:
+ message.coeff = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ReductionParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "operation":
+ message.operation = reader.enum($root.caffe.ReductionParameter.ReductionOp);
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "coeff":
+ message.coeff = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ReductionParameter.prototype.operation = 1;
+$root.caffe.ReductionParameter.prototype.axis = 0;
+$root.caffe.ReductionParameter.prototype.coeff = 1;
+
+$root.caffe.ReductionParameter.ReductionOp = {
+ "SUM": 1,
+ "ASUM": 2,
+ "SUMSQ": 3,
+ "MEAN": 4
+};
+
+$root.caffe.ReLUParameter = class ReLUParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ReLUParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.negative_slope = reader.float();
+ break;
+ case 2:
+ message.engine = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ReLUParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "negative_slope":
+ message.negative_slope = reader.float();
+ break;
+ case "engine":
+ message.engine = reader.enum($root.caffe.ReLUParameter.Engine);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ReLUParameter.prototype.negative_slope = 0;
+$root.caffe.ReLUParameter.prototype.engine = 0;
+
+$root.caffe.ReLUParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.ReshapeParameter = class ReshapeParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ReshapeParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.shape = $root.caffe.BlobShape.decode(reader, reader.uint32());
+ break;
+ case 2:
+ message.axis = reader.int32();
+ break;
+ case 3:
+ message.num_axes = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ReshapeParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "shape":
+ message.shape = $root.caffe.BlobShape.decodeText(reader);
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "num_axes":
+ message.num_axes = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ReshapeParameter.prototype.shape = null;
+$root.caffe.ReshapeParameter.prototype.axis = 0;
+$root.caffe.ReshapeParameter.prototype.num_axes = -1;
+
+$root.caffe.ScaleParameter = class ScaleParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ScaleParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.axis = reader.int32();
+ break;
+ case 2:
+ message.num_axes = reader.int32();
+ break;
+ case 3:
+ message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 4:
+ message.bias_term = reader.bool();
+ break;
+ case 5:
+ message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ScaleParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "num_axes":
+ message.num_axes = reader.int32();
+ break;
+ case "filler":
+ message.filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "bias_term":
+ message.bias_term = reader.bool();
+ break;
+ case "bias_filler":
+ message.bias_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ScaleParameter.prototype.axis = 1;
+$root.caffe.ScaleParameter.prototype.num_axes = 1;
+$root.caffe.ScaleParameter.prototype.filler = null;
+$root.caffe.ScaleParameter.prototype.bias_term = false;
+$root.caffe.ScaleParameter.prototype.bias_filler = null;
+
+$root.caffe.SigmoidParameter = class SigmoidParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SigmoidParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.engine = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SigmoidParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "engine":
+ message.engine = reader.enum($root.caffe.SigmoidParameter.Engine);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SigmoidParameter.prototype.engine = 0;
+
+$root.caffe.SigmoidParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.SliceParameter = class SliceParameter {
+
+ constructor() {
+ this.slice_point = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SliceParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 3:
+ message.axis = reader.int32();
+ break;
+ case 2:
+ message.slice_point = reader.array(message.slice_point, () => reader.uint32(), tag);
+ break;
+ case 1:
+ message.slice_dim = reader.uint32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SliceParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "slice_point":
+ reader.array(message.slice_point, () => reader.uint32());
+ break;
+ case "slice_dim":
+ message.slice_dim = reader.uint32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SliceParameter.prototype.axis = 1;
+$root.caffe.SliceParameter.prototype.slice_dim = 1;
+
+$root.caffe.SoftmaxParameter = class SoftmaxParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SoftmaxParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.engine = reader.int32();
+ break;
+ case 2:
+ message.axis = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SoftmaxParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "engine":
+ message.engine = reader.enum($root.caffe.SoftmaxParameter.Engine);
+ break;
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SoftmaxParameter.prototype.engine = 0;
+$root.caffe.SoftmaxParameter.prototype.axis = 1;
+
+$root.caffe.SoftmaxParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.SwishParameter = class SwishParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SwishParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.beta = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SwishParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "beta":
+ message.beta = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SwishParameter.prototype.beta = 1;
+
+$root.caffe.TanHParameter = class TanHParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.TanHParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.engine = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.TanHParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "engine":
+ message.engine = reader.enum($root.caffe.TanHParameter.Engine);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.TanHParameter.prototype.engine = 0;
+
+$root.caffe.TanHParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.TileParameter = class TileParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.TileParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.axis = reader.int32();
+ break;
+ case 2:
+ message.tiles = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.TileParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "axis":
+ message.axis = reader.int32();
+ break;
+ case "tiles":
+ message.tiles = reader.int32();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.TileParameter.prototype.axis = 1;
+$root.caffe.TileParameter.prototype.tiles = 0;
+
+$root.caffe.ThresholdParameter = class ThresholdParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.ThresholdParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.threshold = reader.float();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.ThresholdParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "threshold":
+ message.threshold = reader.float();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.ThresholdParameter.prototype.threshold = 0;
+
+$root.caffe.WindowDataParameter = class WindowDataParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.WindowDataParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.source = reader.string();
+ break;
+ case 2:
+ message.scale = reader.float();
+ break;
+ case 3:
+ message.mean_file = reader.string();
+ break;
+ case 4:
+ message.batch_size = reader.uint32();
+ break;
+ case 5:
+ message.crop_size = reader.uint32();
+ break;
+ case 6:
+ message.mirror = reader.bool();
+ break;
+ case 7:
+ message.fg_threshold = reader.float();
+ break;
+ case 8:
+ message.bg_threshold = reader.float();
+ break;
+ case 9:
+ message.fg_fraction = reader.float();
+ break;
+ case 10:
+ message.context_pad = reader.uint32();
+ break;
+ case 11:
+ message.crop_mode = reader.string();
+ break;
+ case 12:
+ message.cache_images = reader.bool();
+ break;
+ case 13:
+ message.root_folder = reader.string();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.WindowDataParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "source":
+ message.source = reader.string();
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "mean_file":
+ message.mean_file = reader.string();
+ break;
+ case "batch_size":
+ message.batch_size = reader.uint32();
+ break;
+ case "crop_size":
+ message.crop_size = reader.uint32();
+ break;
+ case "mirror":
+ message.mirror = reader.bool();
+ break;
+ case "fg_threshold":
+ message.fg_threshold = reader.float();
+ break;
+ case "bg_threshold":
+ message.bg_threshold = reader.float();
+ break;
+ case "fg_fraction":
+ message.fg_fraction = reader.float();
+ break;
+ case "context_pad":
+ message.context_pad = reader.uint32();
+ break;
+ case "crop_mode":
+ message.crop_mode = reader.string();
+ break;
+ case "cache_images":
+ message.cache_images = reader.bool();
+ break;
+ case "root_folder":
+ message.root_folder = reader.string();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.WindowDataParameter.prototype.source = "";
+$root.caffe.WindowDataParameter.prototype.scale = 1;
+$root.caffe.WindowDataParameter.prototype.mean_file = "";
+$root.caffe.WindowDataParameter.prototype.batch_size = 0;
+$root.caffe.WindowDataParameter.prototype.crop_size = 0;
+$root.caffe.WindowDataParameter.prototype.mirror = false;
+$root.caffe.WindowDataParameter.prototype.fg_threshold = 0.5;
+$root.caffe.WindowDataParameter.prototype.bg_threshold = 0.5;
+$root.caffe.WindowDataParameter.prototype.fg_fraction = 0.25;
+$root.caffe.WindowDataParameter.prototype.context_pad = 0;
+$root.caffe.WindowDataParameter.prototype.crop_mode = "warp";
+$root.caffe.WindowDataParameter.prototype.cache_images = false;
+$root.caffe.WindowDataParameter.prototype.root_folder = "";
+
+$root.caffe.SPPParameter = class SPPParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.SPPParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.pyramid_height = reader.uint32();
+ break;
+ case 2:
+ message.pool = reader.int32();
+ break;
+ case 6:
+ message.engine = reader.int32();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.SPPParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "pyramid_height":
+ message.pyramid_height = reader.uint32();
+ break;
+ case "pool":
+ message.pool = reader.enum($root.caffe.SPPParameter.PoolMethod);
+ break;
+ case "engine":
+ message.engine = reader.enum($root.caffe.SPPParameter.Engine);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.SPPParameter.prototype.pyramid_height = 0;
+$root.caffe.SPPParameter.prototype.pool = 0;
+$root.caffe.SPPParameter.prototype.engine = 0;
+
+$root.caffe.SPPParameter.PoolMethod = {
+ "MAX": 0,
+ "AVE": 1,
+ "STOCHASTIC": 2
+};
+
+$root.caffe.SPPParameter.Engine = {
+ "DEFAULT": 0,
+ "CAFFE": 1,
+ "CUDNN": 2
+};
+
+$root.caffe.V1LayerParameter = class V1LayerParameter {
+
+ constructor() {
+ this.bottom = [];
+ this.top = [];
+ this.include = [];
+ this.exclude = [];
+ this.blobs = [];
+ this.param = [];
+ this.blob_share_mode = [];
+ this.blobs_lr = [];
+ this.weight_decay = [];
+ this.loss_weight = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.V1LayerParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 2:
+ message.bottom.push(reader.string());
+ break;
+ case 3:
+ message.top.push(reader.string());
+ break;
+ case 4:
+ message.name = reader.string();
+ break;
+ case 32:
+ message.include.push($root.caffe.NetStateRule.decode(reader, reader.uint32()));
+ break;
+ case 33:
+ message.exclude.push($root.caffe.NetStateRule.decode(reader, reader.uint32()));
+ break;
+ case 5:
+ message.type = reader.int32();
+ break;
+ case 6:
+ message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32()));
+ break;
+ case 1001:
+ message.param.push(reader.string());
+ break;
+ case 1002:
+ message.blob_share_mode = reader.array(message.blob_share_mode, () => reader.int32(), tag);
+ break;
+ case 7:
+ message.blobs_lr = reader.floats(message.blobs_lr, tag);
+ break;
+ case 8:
+ message.weight_decay = reader.floats(message.weight_decay, tag);
+ break;
+ case 35:
+ message.loss_weight = reader.floats(message.loss_weight, tag);
+ break;
+ case 27:
+ message.accuracy_param = $root.caffe.AccuracyParameter.decode(reader, reader.uint32());
+ break;
+ case 23:
+ message.argmax_param = $root.caffe.ArgMaxParameter.decode(reader, reader.uint32());
+ break;
+ case 9:
+ message.concat_param = $root.caffe.ConcatParameter.decode(reader, reader.uint32());
+ break;
+ case 40:
+ message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decode(reader, reader.uint32());
+ break;
+ case 10:
+ message.convolution_param = $root.caffe.ConvolutionParameter.decode(reader, reader.uint32());
+ break;
+ case 11:
+ message.data_param = $root.caffe.DataParameter.decode(reader, reader.uint32());
+ break;
+ case 12:
+ message.dropout_param = $root.caffe.DropoutParameter.decode(reader, reader.uint32());
+ break;
+ case 26:
+ message.dummy_data_param = $root.caffe.DummyDataParameter.decode(reader, reader.uint32());
+ break;
+ case 24:
+ message.eltwise_param = $root.caffe.EltwiseParameter.decode(reader, reader.uint32());
+ break;
+ case 41:
+ message.exp_param = $root.caffe.ExpParameter.decode(reader, reader.uint32());
+ break;
+ case 13:
+ message.hdf5_data_param = $root.caffe.HDF5DataParameter.decode(reader, reader.uint32());
+ break;
+ case 14:
+ message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32());
+ break;
+ case 29:
+ message.hinge_loss_param = $root.caffe.HingeLossParameter.decode(reader, reader.uint32());
+ break;
+ case 15:
+ message.image_data_param = $root.caffe.ImageDataParameter.decode(reader, reader.uint32());
+ break;
+ case 16:
+ message.infogain_loss_param = $root.caffe.InfogainLossParameter.decode(reader, reader.uint32());
+ break;
+ case 17:
+ message.inner_product_param = $root.caffe.InnerProductParameter.decode(reader, reader.uint32());
+ break;
+ case 18:
+ message.lrn_param = $root.caffe.LRNParameter.decode(reader, reader.uint32());
+ break;
+ case 22:
+ message.memory_data_param = $root.caffe.MemoryDataParameter.decode(reader, reader.uint32());
+ break;
+ case 34:
+ message.mvn_param = $root.caffe.MVNParameter.decode(reader, reader.uint32());
+ break;
+ case 19:
+ message.pooling_param = $root.caffe.PoolingParameter.decode(reader, reader.uint32());
+ break;
+ case 21:
+ message.power_param = $root.caffe.PowerParameter.decode(reader, reader.uint32());
+ break;
+ case 30:
+ message.relu_param = $root.caffe.ReLUParameter.decode(reader, reader.uint32());
+ break;
+ case 38:
+ message.sigmoid_param = $root.caffe.SigmoidParameter.decode(reader, reader.uint32());
+ break;
+ case 39:
+ message.softmax_param = $root.caffe.SoftmaxParameter.decode(reader, reader.uint32());
+ break;
+ case 31:
+ message.slice_param = $root.caffe.SliceParameter.decode(reader, reader.uint32());
+ break;
+ case 37:
+ message.tanh_param = $root.caffe.TanHParameter.decode(reader, reader.uint32());
+ break;
+ case 25:
+ message.threshold_param = $root.caffe.ThresholdParameter.decode(reader, reader.uint32());
+ break;
+ case 20:
+ message.window_data_param = $root.caffe.WindowDataParameter.decode(reader, reader.uint32());
+ break;
+ case 36:
+ message.transform_param = $root.caffe.TransformationParameter.decode(reader, reader.uint32());
+ break;
+ case 42:
+ message.loss_param = $root.caffe.LossParameter.decode(reader, reader.uint32());
+ break;
+ case 1:
+ message.layer = $root.caffe.V0LayerParameter.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.V1LayerParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "bottom":
+ reader.array(message.bottom, () => reader.string());
+ break;
+ case "top":
+ reader.array(message.top, () => reader.string());
+ break;
+ case "name":
+ message.name = reader.string();
+ break;
+ case "include":
+ message.include.push($root.caffe.NetStateRule.decodeText(reader));
+ break;
+ case "exclude":
+ message.exclude.push($root.caffe.NetStateRule.decodeText(reader));
+ break;
+ case "type":
+ message.type = reader.enum($root.caffe.V1LayerParameter.LayerType);
+ break;
+ case "blobs":
+ message.blobs.push($root.caffe.BlobProto.decodeText(reader));
+ break;
+ case "param":
+ reader.array(message.param, () => reader.string());
+ break;
+ case "blob_share_mode":
+ reader.array(message.blob_share_mode, () => reader.enum($root.caffe.V1LayerParameter.DimCheckMode));
+ break;
+ case "blobs_lr":
+ reader.array(message.blobs_lr, () => reader.float());
+ break;
+ case "weight_decay":
+ reader.array(message.weight_decay, () => reader.float());
+ break;
+ case "loss_weight":
+ reader.array(message.loss_weight, () => reader.float());
+ break;
+ case "accuracy_param":
+ message.accuracy_param = $root.caffe.AccuracyParameter.decodeText(reader);
+ break;
+ case "argmax_param":
+ message.argmax_param = $root.caffe.ArgMaxParameter.decodeText(reader);
+ break;
+ case "concat_param":
+ message.concat_param = $root.caffe.ConcatParameter.decodeText(reader);
+ break;
+ case "contrastive_loss_param":
+ message.contrastive_loss_param = $root.caffe.ContrastiveLossParameter.decodeText(reader);
+ break;
+ case "convolution_param":
+ message.convolution_param = $root.caffe.ConvolutionParameter.decodeText(reader);
+ break;
+ case "data_param":
+ message.data_param = $root.caffe.DataParameter.decodeText(reader);
+ break;
+ case "dropout_param":
+ message.dropout_param = $root.caffe.DropoutParameter.decodeText(reader);
+ break;
+ case "dummy_data_param":
+ message.dummy_data_param = $root.caffe.DummyDataParameter.decodeText(reader);
+ break;
+ case "eltwise_param":
+ message.eltwise_param = $root.caffe.EltwiseParameter.decodeText(reader);
+ break;
+ case "exp_param":
+ message.exp_param = $root.caffe.ExpParameter.decodeText(reader);
+ break;
+ case "hdf5_data_param":
+ message.hdf5_data_param = $root.caffe.HDF5DataParameter.decodeText(reader);
+ break;
+ case "hdf5_output_param":
+ message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader);
+ break;
+ case "hinge_loss_param":
+ message.hinge_loss_param = $root.caffe.HingeLossParameter.decodeText(reader);
+ break;
+ case "image_data_param":
+ message.image_data_param = $root.caffe.ImageDataParameter.decodeText(reader);
+ break;
+ case "infogain_loss_param":
+ message.infogain_loss_param = $root.caffe.InfogainLossParameter.decodeText(reader);
+ break;
+ case "inner_product_param":
+ message.inner_product_param = $root.caffe.InnerProductParameter.decodeText(reader);
+ break;
+ case "lrn_param":
+ message.lrn_param = $root.caffe.LRNParameter.decodeText(reader);
+ break;
+ case "memory_data_param":
+ message.memory_data_param = $root.caffe.MemoryDataParameter.decodeText(reader);
+ break;
+ case "mvn_param":
+ message.mvn_param = $root.caffe.MVNParameter.decodeText(reader);
+ break;
+ case "pooling_param":
+ message.pooling_param = $root.caffe.PoolingParameter.decodeText(reader);
+ break;
+ case "power_param":
+ message.power_param = $root.caffe.PowerParameter.decodeText(reader);
+ break;
+ case "relu_param":
+ message.relu_param = $root.caffe.ReLUParameter.decodeText(reader);
+ break;
+ case "sigmoid_param":
+ message.sigmoid_param = $root.caffe.SigmoidParameter.decodeText(reader);
+ break;
+ case "softmax_param":
+ message.softmax_param = $root.caffe.SoftmaxParameter.decodeText(reader);
+ break;
+ case "slice_param":
+ message.slice_param = $root.caffe.SliceParameter.decodeText(reader);
+ break;
+ case "tanh_param":
+ message.tanh_param = $root.caffe.TanHParameter.decodeText(reader);
+ break;
+ case "threshold_param":
+ message.threshold_param = $root.caffe.ThresholdParameter.decodeText(reader);
+ break;
+ case "window_data_param":
+ message.window_data_param = $root.caffe.WindowDataParameter.decodeText(reader);
+ break;
+ case "transform_param":
+ message.transform_param = $root.caffe.TransformationParameter.decodeText(reader);
+ break;
+ case "loss_param":
+ message.loss_param = $root.caffe.LossParameter.decodeText(reader);
+ break;
+ case "layer":
+ message.layer = $root.caffe.V0LayerParameter.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.V1LayerParameter.prototype.name = "";
+$root.caffe.V1LayerParameter.prototype.type = 0;
+$root.caffe.V1LayerParameter.prototype.accuracy_param = null;
+$root.caffe.V1LayerParameter.prototype.argmax_param = null;
+$root.caffe.V1LayerParameter.prototype.concat_param = null;
+$root.caffe.V1LayerParameter.prototype.contrastive_loss_param = null;
+$root.caffe.V1LayerParameter.prototype.convolution_param = null;
+$root.caffe.V1LayerParameter.prototype.data_param = null;
+$root.caffe.V1LayerParameter.prototype.dropout_param = null;
+$root.caffe.V1LayerParameter.prototype.dummy_data_param = null;
+$root.caffe.V1LayerParameter.prototype.eltwise_param = null;
+$root.caffe.V1LayerParameter.prototype.exp_param = null;
+$root.caffe.V1LayerParameter.prototype.hdf5_data_param = null;
+$root.caffe.V1LayerParameter.prototype.hdf5_output_param = null;
+$root.caffe.V1LayerParameter.prototype.hinge_loss_param = null;
+$root.caffe.V1LayerParameter.prototype.image_data_param = null;
+$root.caffe.V1LayerParameter.prototype.infogain_loss_param = null;
+$root.caffe.V1LayerParameter.prototype.inner_product_param = null;
+$root.caffe.V1LayerParameter.prototype.lrn_param = null;
+$root.caffe.V1LayerParameter.prototype.memory_data_param = null;
+$root.caffe.V1LayerParameter.prototype.mvn_param = null;
+$root.caffe.V1LayerParameter.prototype.pooling_param = null;
+$root.caffe.V1LayerParameter.prototype.power_param = null;
+$root.caffe.V1LayerParameter.prototype.relu_param = null;
+$root.caffe.V1LayerParameter.prototype.sigmoid_param = null;
+$root.caffe.V1LayerParameter.prototype.softmax_param = null;
+$root.caffe.V1LayerParameter.prototype.slice_param = null;
+$root.caffe.V1LayerParameter.prototype.tanh_param = null;
+$root.caffe.V1LayerParameter.prototype.threshold_param = null;
+$root.caffe.V1LayerParameter.prototype.window_data_param = null;
+$root.caffe.V1LayerParameter.prototype.transform_param = null;
+$root.caffe.V1LayerParameter.prototype.loss_param = null;
+$root.caffe.V1LayerParameter.prototype.layer = null;
+
+$root.caffe.V1LayerParameter.LayerType = {
+ "NONE": 0,
+ "ABSVAL": 35,
+ "ACCURACY": 1,
+ "ARGMAX": 30,
+ "BNLL": 2,
+ "CONCAT": 3,
+ "CONTRASTIVE_LOSS": 37,
+ "CONVOLUTION": 4,
+ "DATA": 5,
+ "DECONVOLUTION": 39,
+ "DROPOUT": 6,
+ "DUMMY_DATA": 32,
+ "EUCLIDEAN_LOSS": 7,
+ "ELTWISE": 25,
+ "EXP": 38,
+ "FLATTEN": 8,
+ "HDF5_DATA": 9,
+ "HDF5_OUTPUT": 10,
+ "HINGE_LOSS": 28,
+ "IM2COL": 11,
+ "IMAGE_DATA": 12,
+ "INFOGAIN_LOSS": 13,
+ "INNER_PRODUCT": 14,
+ "LRN": 15,
+ "MEMORY_DATA": 29,
+ "MULTINOMIAL_LOGISTIC_LOSS": 16,
+ "MVN": 34,
+ "POOLING": 17,
+ "POWER": 26,
+ "RELU": 18,
+ "SIGMOID": 19,
+ "SIGMOID_CROSS_ENTROPY_LOSS": 27,
+ "SILENCE": 36,
+ "SOFTMAX": 20,
+ "SOFTMAX_LOSS": 21,
+ "SPLIT": 22,
+ "SLICE": 33,
+ "TANH": 23,
+ "WINDOW_DATA": 24,
+ "THRESHOLD": 31
+};
+
+$root.caffe.V1LayerParameter.DimCheckMode = {
+ "STRICT": 0,
+ "PERMISSIVE": 1
+};
+
+$root.caffe.V0LayerParameter = class V0LayerParameter {
+
+ constructor() {
+ this.blobs = [];
+ this.blobs_lr = [];
+ this.weight_decay = [];
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.V0LayerParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.name = reader.string();
+ break;
+ case 2:
+ message.type = reader.string();
+ break;
+ case 3:
+ message.num_output = reader.uint32();
+ break;
+ case 4:
+ message.biasterm = reader.bool();
+ break;
+ case 5:
+ message.weight_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 6:
+ message.bias_filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 7:
+ message.pad = reader.uint32();
+ break;
+ case 8:
+ message.kernelsize = reader.uint32();
+ break;
+ case 9:
+ message.group = reader.uint32();
+ break;
+ case 10:
+ message.stride = reader.uint32();
+ break;
+ case 11:
+ message.pool = reader.int32();
+ break;
+ case 12:
+ message.dropout_ratio = reader.float();
+ break;
+ case 13:
+ message.local_size = reader.uint32();
+ break;
+ case 14:
+ message.alpha = reader.float();
+ break;
+ case 15:
+ message.beta = reader.float();
+ break;
+ case 22:
+ message.k = reader.float();
+ break;
+ case 16:
+ message.source = reader.string();
+ break;
+ case 17:
+ message.scale = reader.float();
+ break;
+ case 18:
+ message.meanfile = reader.string();
+ break;
+ case 19:
+ message.batchsize = reader.uint32();
+ break;
+ case 20:
+ message.cropsize = reader.uint32();
+ break;
+ case 21:
+ message.mirror = reader.bool();
+ break;
+ case 50:
+ message.blobs.push($root.caffe.BlobProto.decode(reader, reader.uint32()));
+ break;
+ case 51:
+ message.blobs_lr = reader.floats(message.blobs_lr, tag);
+ break;
+ case 52:
+ message.weight_decay = reader.floats(message.weight_decay, tag);
+ break;
+ case 53:
+ message.rand_skip = reader.uint32();
+ break;
+ case 54:
+ message.det_fg_threshold = reader.float();
+ break;
+ case 55:
+ message.det_bg_threshold = reader.float();
+ break;
+ case 56:
+ message.det_fg_fraction = reader.float();
+ break;
+ case 58:
+ message.det_context_pad = reader.uint32();
+ break;
+ case 59:
+ message.det_crop_mode = reader.string();
+ break;
+ case 60:
+ message.new_num = reader.int32();
+ break;
+ case 61:
+ message.new_channels = reader.int32();
+ break;
+ case 62:
+ message.new_height = reader.int32();
+ break;
+ case 63:
+ message.new_width = reader.int32();
+ break;
+ case 64:
+ message.shuffle_images = reader.bool();
+ break;
+ case 65:
+ message.concat_dim = reader.uint32();
+ break;
+ case 1001:
+ message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decode(reader, reader.uint32());
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.V0LayerParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "name":
+ message.name = reader.string();
+ break;
+ case "type":
+ message.type = reader.string();
+ break;
+ case "num_output":
+ message.num_output = reader.uint32();
+ break;
+ case "biasterm":
+ message.biasterm = reader.bool();
+ break;
+ case "weight_filler":
+ message.weight_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "bias_filler":
+ message.bias_filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "pad":
+ message.pad = reader.uint32();
+ break;
+ case "kernelsize":
+ message.kernelsize = reader.uint32();
+ break;
+ case "group":
+ message.group = reader.uint32();
+ break;
+ case "stride":
+ message.stride = reader.uint32();
+ break;
+ case "pool":
+ message.pool = reader.enum($root.caffe.V0LayerParameter.PoolMethod);
+ break;
+ case "dropout_ratio":
+ message.dropout_ratio = reader.float();
+ break;
+ case "local_size":
+ message.local_size = reader.uint32();
+ break;
+ case "alpha":
+ message.alpha = reader.float();
+ break;
+ case "beta":
+ message.beta = reader.float();
+ break;
+ case "k":
+ message.k = reader.float();
+ break;
+ case "source":
+ message.source = reader.string();
+ break;
+ case "scale":
+ message.scale = reader.float();
+ break;
+ case "meanfile":
+ message.meanfile = reader.string();
+ break;
+ case "batchsize":
+ message.batchsize = reader.uint32();
+ break;
+ case "cropsize":
+ message.cropsize = reader.uint32();
+ break;
+ case "mirror":
+ message.mirror = reader.bool();
+ break;
+ case "blobs":
+ message.blobs.push($root.caffe.BlobProto.decodeText(reader));
+ break;
+ case "blobs_lr":
+ reader.array(message.blobs_lr, () => reader.float());
+ break;
+ case "weight_decay":
+ reader.array(message.weight_decay, () => reader.float());
+ break;
+ case "rand_skip":
+ message.rand_skip = reader.uint32();
+ break;
+ case "det_fg_threshold":
+ message.det_fg_threshold = reader.float();
+ break;
+ case "det_bg_threshold":
+ message.det_bg_threshold = reader.float();
+ break;
+ case "det_fg_fraction":
+ message.det_fg_fraction = reader.float();
+ break;
+ case "det_context_pad":
+ message.det_context_pad = reader.uint32();
+ break;
+ case "det_crop_mode":
+ message.det_crop_mode = reader.string();
+ break;
+ case "new_num":
+ message.new_num = reader.int32();
+ break;
+ case "new_channels":
+ message.new_channels = reader.int32();
+ break;
+ case "new_height":
+ message.new_height = reader.int32();
+ break;
+ case "new_width":
+ message.new_width = reader.int32();
+ break;
+ case "shuffle_images":
+ message.shuffle_images = reader.bool();
+ break;
+ case "concat_dim":
+ message.concat_dim = reader.uint32();
+ break;
+ case "hdf5_output_param":
+ message.hdf5_output_param = $root.caffe.HDF5OutputParameter.decodeText(reader);
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.V0LayerParameter.prototype.name = "";
+$root.caffe.V0LayerParameter.prototype.type = "";
+$root.caffe.V0LayerParameter.prototype.num_output = 0;
+$root.caffe.V0LayerParameter.prototype.biasterm = true;
+$root.caffe.V0LayerParameter.prototype.weight_filler = null;
+$root.caffe.V0LayerParameter.prototype.bias_filler = null;
+$root.caffe.V0LayerParameter.prototype.pad = 0;
+$root.caffe.V0LayerParameter.prototype.kernelsize = 0;
+$root.caffe.V0LayerParameter.prototype.group = 1;
+$root.caffe.V0LayerParameter.prototype.stride = 1;
+$root.caffe.V0LayerParameter.prototype.pool = 0;
+$root.caffe.V0LayerParameter.prototype.dropout_ratio = 0.5;
+$root.caffe.V0LayerParameter.prototype.local_size = 5;
+$root.caffe.V0LayerParameter.prototype.alpha = 1;
+$root.caffe.V0LayerParameter.prototype.beta = 0.75;
+$root.caffe.V0LayerParameter.prototype.k = 1;
+$root.caffe.V0LayerParameter.prototype.source = "";
+$root.caffe.V0LayerParameter.prototype.scale = 1;
+$root.caffe.V0LayerParameter.prototype.meanfile = "";
+$root.caffe.V0LayerParameter.prototype.batchsize = 0;
+$root.caffe.V0LayerParameter.prototype.cropsize = 0;
+$root.caffe.V0LayerParameter.prototype.mirror = false;
+$root.caffe.V0LayerParameter.prototype.rand_skip = 0;
+$root.caffe.V0LayerParameter.prototype.det_fg_threshold = 0.5;
+$root.caffe.V0LayerParameter.prototype.det_bg_threshold = 0.5;
+$root.caffe.V0LayerParameter.prototype.det_fg_fraction = 0.25;
+$root.caffe.V0LayerParameter.prototype.det_context_pad = 0;
+$root.caffe.V0LayerParameter.prototype.det_crop_mode = "warp";
+$root.caffe.V0LayerParameter.prototype.new_num = 0;
+$root.caffe.V0LayerParameter.prototype.new_channels = 0;
+$root.caffe.V0LayerParameter.prototype.new_height = 0;
+$root.caffe.V0LayerParameter.prototype.new_width = 0;
+$root.caffe.V0LayerParameter.prototype.shuffle_images = false;
+$root.caffe.V0LayerParameter.prototype.concat_dim = 1;
+$root.caffe.V0LayerParameter.prototype.hdf5_output_param = null;
+
+$root.caffe.V0LayerParameter.PoolMethod = {
+ "MAX": 0,
+ "AVE": 1,
+ "STOCHASTIC": 2
+};
+
+$root.caffe.PReLUParameter = class PReLUParameter {
+
+ constructor() {
+ }
+
+ static decode(reader, length) {
+ const message = new $root.caffe.PReLUParameter();
+ const end = length !== undefined ? reader.position + length : reader.length;
+ while (reader.position < end) {
+ const tag = reader.uint32();
+ switch (tag >>> 3) {
+ case 1:
+ message.filler = $root.caffe.FillerParameter.decode(reader, reader.uint32());
+ break;
+ case 2:
+ message.channel_shared = reader.bool();
+ break;
+ default:
+ reader.skipType(tag & 7);
+ break;
+ }
+ }
+ return message;
+ }
+
+ static decodeText(reader) {
+ const message = new $root.caffe.PReLUParameter();
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ switch (tag) {
+ case "filler":
+ message.filler = $root.caffe.FillerParameter.decodeText(reader);
+ break;
+ case "channel_shared":
+ message.channel_shared = reader.bool();
+ break;
+ default:
+ reader.field(tag, message);
+ break;
+ }
+ }
+ return message;
+ }
+};
+
+$root.caffe.PReLUParameter.prototype.filler = null;
+$root.caffe.PReLUParameter.prototype.channel_shared = false;
diff --git a/caffe.js b/caffe.js
new file mode 100644
index 00000000000..cc308d15cf3
--- /dev/null
+++ b/caffe.js
@@ -0,0 +1,719 @@
+
+import * as protobuf from './protobuf.js';
+
+const caffe = {};
+
+caffe.ModelFactory = class {
+
+ match(context) {
+ const identifier = context.identifier;
+ const extension = identifier.split('.').pop().toLowerCase();
+ if (extension == 'caffemodel') {
+ return 'caffe.pb';
+ }
+ if (identifier == 'saved_model.pbtxt' || identifier == 'saved_model.prototxt' ||
+ identifier.endsWith('predict_net.pbtxt') || identifier.endsWith('predict_net.prototxt') ||
+ identifier.endsWith('init_net.pbtxt') || identifier.endsWith('init_net.prototxt')) {
+ return undefined;
+ }
+ const tags = context.tags('pbtxt');
+ if (tags.has('layer') || tags.has('layers')) {
+ return 'caffe.pbtxt';
+ }
+ if (tags.has('net') || tags.has('train_net') || tags.has('net_param')) {
+ return 'caffe.pbtxt.solver';
+ }
+ return undefined;
+ }
+
+ async open(context, target) {
+ await context.require('./caffe-proto');
+ caffe.proto = protobuf.get('caffe').caffe;
+ const openModel = async (context, netParameter) => {
+ const metadata = await context.metadata('caffe-metadata.json');
+ return new caffe.Model(metadata, netParameter);
+ };
+ const openNetParameterText = (context, identifier, buffer) => {
+ let netParameter = null;
+ try {
+ const reader = protobuf.TextReader.open(buffer);
+ reader.field = function(tag, message) {
+ const type = message.constructor.name;
+ if (tag.endsWith('_param') && (type == 'LayerParameter' || type == 'V1LayerParameter' || type == 'V0LayerParameter')) {
+ message[tag] = caffe.ModelFactory._decodeText(reader);
+ return;
+ } else if (message.constructor.name.endsWith('Parameter') || message.constructor.name === 'ParamSpec') {
+ if (message[tag]) {
+ if (!Array.isArray(message[tag])) {
+ message[tag] = [ message[tag] ];
+ }
+ message[tag].push(this.read());
+ } else {
+ message[tag] = this.read();
+ }
+ return;
+ }
+ throw new Error(`Unknown field '${tag}' ${this.location()}`);
+ };
+ reader.enum = function(type) {
+ const token = this.token();
+ this.next();
+ this.semicolon();
+ if (!Object.prototype.hasOwnProperty.call(type, token)) {
+ const value = Number.parseInt(token, 10);
+ if (!Number.isNaN(token - value)) {
+ return value;
+ }
+ return token;
+ }
+ return type[token];
+ };
+ if (/MobileNetSSD_train_template.prototxt/.exec(identifier)) {
+ reader.integer = function() {
+ const token = this.token();
+ const value = Number.parseInt(token, 10);
+ this.next();
+ this.semicolon();
+ if (Number.isNaN(token - value)) {
+ return token;
+ }
+ return value;
+ };
+ }
+ netParameter = caffe.proto.NetParameter.decodeText(reader);
+ } catch (error) {
+ const message = error && error.message ? error.message : error.toString();
+ throw new caffe.Error(`File text format is not caffe.NetParameter (${message.replace(/\.$/, '')}).`);
+ }
+ return openModel(context, netParameter);
+ };
+ switch (target) {
+ case 'caffe.pbtxt.solver': {
+ const stream = context.stream;
+ const reader = protobuf.TextReader.open(stream);
+ reader.field = function(tag, message) {
+ if (message instanceof caffe.proto.SolverParameter) {
+ message[tag] = this.read();
+ return;
+ }
+ throw new Error(`Unknown field '${tag}'${this.location()}`);
+ };
+ const solver = caffe.proto.SolverParameter.decodeText(reader);
+ if (solver.net_param) {
+ return openModel(context, solver.net_param);
+ }
+ let name = solver.net || solver.train_net;
+ name = name.split('/').pop();
+ try {
+ const content = await context.fetch(name);
+ const buffer = content.stream.peek();
+ return openNetParameterText(context, name, buffer);
+ } catch (error) {
+ const message = error.message ? error.message : error.toString();
+ throw new caffe.Error(`Failed to load '${name}' (${message.replace(/\.$/, '')}).`);
+ }
+ }
+ case 'caffe.pbtxt': {
+ return openNetParameterText(context, context.identifier, context.stream.peek());
+ }
+ case 'caffe.pb': {
+ let netParameter = null;
+ try {
+ const stream = context.stream;
+ const reader = protobuf.BinaryReader.open(stream);
+ netParameter = caffe.proto.NetParameter.decode(reader);
+ } catch (error) {
+ const message = error && error.message ? error.message : error.toString();
+ throw new caffe.Error(`File format is not caffe.NetParameter (${message.replace(/\.$/, '')}).`);
+ }
+ return openModel(context, netParameter);
+ }
+ default: {
+ throw new caffe.Error(`Unsupported Caffe format '${target}'.`);
+ }
+ }
+ }
+
+ static _decodeText(reader) {
+ const message = {};
+ reader.start();
+ while (!reader.end()) {
+ const tag = reader.tag();
+ const value = reader.read();
+ if (!message[tag]) {
+ message[tag] = value;
+ } else {
+ if (!Array.isArray(message[tag])) {
+ message[tag] = [ message[tag] ];
+ }
+ message[tag].push(value);
+ }
+ }
+ return message;
+ }
+};
+
+caffe.Model = class {
+
+ constructor(metadata, net) {
+
+ this._name = net.name;
+
+ if (net.layers && net.layers.length > 0) {
+ if (net.layers.every((layer) => Object.prototype.hasOwnProperty.call(layer, 'layer'))) {
+ this._version = 0;
+ net.layer = net.layers;
+ } else {
+ this._version = 1;
+ net.layer = net.layers;
+ }
+ } else if (net.layer && net.layer.length > 0) {
+ this._version = 2;
+ }
+
+ const phases = new Set();
+ for (const layer of net.layer) {
+ for (const include of layer.include) {
+ if (include.phase !== undefined) {
+ phases.add(include.phase);
+ }
+ }
+ }
+ if (phases.size === 0) {
+ phases.add(-1);
+ }
+
+ this._graphs = [];
+ for (const phase of phases) {
+ const graph = new caffe.Graph(metadata, phase, net, this._version);
+ this._graphs.push(graph);
+ }
+ }
+
+ get format() {
+ return `Caffe${this._version ? ` v${this._version}` : ''}`;
+ }
+
+ get graphs() {
+ return this._graphs;
+ }
+};
+
+caffe.Graph = class {
+
+ constructor(metadata, phase, net, version) {
+ switch (phase) {
+ case 0: this._phase = 'TRAIN'; break;
+ case 1: this._phase = 'TEST'; break;
+ case -1: this._phase = ''; break;
+ default: this._phase = phase.toString(); break;
+ }
+ this._nodes = [];
+ this._inputs = [];
+ this._outputs = [];
+ for (const layer of net.layer) {
+ layer.input = layer.bottom.slice(0);
+ layer.output = layer.top.slice(0);
+ layer.chain = [];
+ }
+ const layers = [];
+ for (const layer of net.layer) {
+ if (phase === -1 || layer.include.every((include) => include.phase === phase)) {
+ layers.push(layer);
+ }
+ }
+ const scopes = new Map();
+ let index = 0;
+ for (const layer of layers) {
+ layer.input = layer.input.map((input) => scopes.has(input) ? scopes.get(input) : input);
+ layer.output = layer.output.map((output) => {
+ const value = scopes.has(output) ? `${output}\n${index}` : output;
+ scopes.set(output, value);
+ return value;
+ });
+ index++;
+ }
+ // Graph Inputs
+ const usedOutputs = new Set();
+ for (const layer of layers) {
+ for (const output of layer.output) {
+ usedOutputs.add(output);
+ }
+ }
+ const unusedInputs = [];
+ for (const layer of layers) {
+ for (const input of layer.input) {
+ if (!usedOutputs.has(input)) {
+ unusedInputs.push(input);
+ }
+ }
+ }
+ const values = new Map();
+ const value = (name, type) => {
+ if (!values.has(name)) {
+ values.set(name, new caffe.Value(name, type));
+ } else if (type) {
+ throw new caffe.Error(`Duplicate value '${name}'.`);
+ }
+ return values.get(name);
+ };
+ const nodes = [];
+ let lastLayer = null;
+ let lastTop = null;
+ while (layers.length > 0) {
+ let layer = layers.shift();
+ if (layer.output.length == 1 && layer.input.length == 1 &&
+ layer.output[0].split('\n').shift() == layer.input[0].split('\n').shift() &&
+ lastLayer &&
+ lastTop == layer.output[0].split('\n').shift()) {
+ lastLayer.chain = lastLayer.chain || [];
+ lastLayer.chain.push(layer);
+ } else {
+ if (layer.type == 'Input' || layer.type == 'Data') {
+ if (layer.input.length == 0 && layer.output.length == 1 &&
+ layer.input_param && layer.input_param.shape &&
+ layer.input_param.shape.length == 1 && layer.input_param.shape[0].dim) {
+ const shape = new caffe.TensorShape(layer.input_param.shape[0].dim.map((dim) => dim.toNumber()));
+ const type = new caffe.TensorType(null, shape);
+ this._inputs.push(new caffe.Argument(layer.output[0], [ value(layer.output[0], type) ]));
+ layer = null;
+ }
+ }
+ if (layer) {
+ nodes.push(layer);
+ lastLayer = null;
+ lastTop = null;
+ if (layer.output.length == 1) {
+ lastLayer = layer;
+ lastTop = layer.output[0].split('\n').shift();
+ }
+ }
+ }
+ }
+ if (net.input) {
+ for (let i = 0; i < net.input.length; i++) {
+ const input = net.input[i];
+ if (this._inputs.some((item) => item.name === input)) {
+ continue;
+ }
+ let inputType = null;
+ if (net.input_shape && i < net.input_shape.length) {
+ const blobShape = net.input_shape[i];
+ if (blobShape && blobShape.dim) {
+ const shape = new caffe.TensorShape(blobShape.dim.map((dim) => dim.toNumber()));
+ inputType = new caffe.TensorType(null, shape);
+ }
+ }
+ const dim = i * 4;
+ if (!inputType && net.input_dim && net.input_dim.length >= dim) {
+ const shape = new caffe.TensorShape(net.input_dim.slice(dim, dim + 4));
+ inputType = new caffe.TensorType(null, shape);
+ }
+ this._inputs.push(new caffe.Argument(input, [ value(input, inputType, null) ]));
+ }
+ }
+
+ for (const layer of nodes) {
+ const node = new caffe.Node(metadata, layer, version, value);
+ if (layer.chain && layer.chain.length > 0) {
+ for (const chain of layer.chain) {
+ node.chain.push(new caffe.Node(metadata, chain, version, value));
+ }
+ }
+ this._nodes.push(node);
+ }
+
+ if (this._inputs.length === 0 && unusedInputs.length === 1) {
+ this._inputs.push(new caffe.Argument(unusedInputs[0], [ value(unusedInputs[0], null) ]));
+ }
+ }
+
+ get name() {
+ return this._phase;
+ }
+
+ get type() {
+ return '';
+ }
+
+ get inputs() {
+ return this._inputs;
+ }
+
+ get outputs() {
+ return this._outputs;
+ }
+
+ get nodes() {
+ return this._nodes;
+ }
+};
+
+caffe.Argument = class {
+
+ constructor(name, value) {
+ this._name = name;
+ this._value = value;
+ }
+
+ get name() {
+ return this._name;
+ }
+
+ get value() {
+ return this._value;
+ }
+};
+
+caffe.Value = class {
+
+ constructor(name, type, initializer) {
+ if (typeof name !== 'string') {
+ throw new caffe.Error(`Invalid value identifier '${JSON.stringify(name)}'.`);
+ }
+ this._name = name;
+ this._type = type || null;
+ this._initializer = initializer || null;
+ }
+
+ get name() {
+ return this._name;
+ }
+
+ get type() {
+ return this._type;
+ }
+
+ get initializer() {
+ return this._initializer;
+ }
+};
+
+caffe.Node = class {
+
+ constructor(metadata, layer, version, value) {
+ this._chain = [];
+ this._attributes = [];
+ let type;
+ switch (version) {
+ case 0: {
+ this._name = layer.layer.name;
+ type = layer.layer.type;
+ break;
+ }
+ case 1: {
+ this._name = layer.name;
+ type = caffe.Utility.layerType(layer.type);
+ break;
+ }
+ case 2: {
+ this._name = layer.name;
+ type = layer.type;
+ break;
+ }
+ default: {
+ throw new new caffe.Error(`Unsupported Caffe version '${version}'.`);
+ }
+ }
+ this._type = metadata.type(type) || { name: type };
+
+ let initializers = [];
+ switch (version) {
+ case 0: {
+ for (const name of Object.keys(layer.layer)) {
+ if (name != 'type' && name != 'name' && name != 'blobs' && name != 'blobs_lr') {
+ const value = layer.layer[name];
+ const attribute = new caffe.Attribute(metadata.attribute(type, name), name, value);
+ this._attributes.push(attribute);
+ }
+ }
+ initializers = layer.layer.blobs.map((blob) => new caffe.Tensor(blob));
+ break;
+ }
+ case 1:
+ case 2: {
+ for (const layer_kind of Object.keys(layer)) {
+ if (layer_kind.endsWith('_param') || layer_kind == 'transform_param') {
+ const param = layer[layer_kind];
+ if (type == 'Deconvolution') {
+ type = 'Convolution';
+ }
+ const prototype = Object.getPrototypeOf(param);
+ for (const name of Object.keys(param)) {
+ const defaultValue = prototype[name];
+ const value = param[name];
+ const attribute = new caffe.Attribute(metadata.attribute(type, name), name, value, defaultValue);
+ this._attributes.push(attribute);
+ }
+ }
+ }
+ if (layer.include && layer.include.length > 0) {
+ const attribute = new caffe.Attribute(metadata.attribute(type, 'include'), 'include', layer.include);
+ this._attributes.push(attribute);
+ }
+ if (layer.exclude && layer.exclude.length > 0) {
+ const attribute = new caffe.Attribute(metadata.attribute(type, 'exclude'), 'exclude', layer.exclude);
+ this._attributes.push(attribute);
+ }
+ if (this._type == 'Data' && layer.input_param && layer.input_param.shape) {
+ const attribute = new caffe.Attribute(metadata.attribute(type, 'shape'), 'shape', layer.input_param.shape);
+ this._attributes.push(attribute);
+ }
+ initializers = layer.blobs.map((blob) => new caffe.Tensor(blob));
+ break;
+ }
+ default: {
+ throw new caffe.Error(`Unsupported Caffe version '${version}'.`);
+ }
+ }
+ this._inputs = [];
+ const inputs = layer.input.concat(initializers);
+ let inputIndex = 0;
+ if (this._type && this._type.inputs) {
+ for (const inputDef of this._type.inputs) {
+ if (inputIndex < inputs.length || inputDef.option != 'optional') {
+ const count = inputDef.option == 'variadic' ? inputs.length - inputIndex : 1;
+ const values = inputs.slice(inputIndex, inputIndex + count).filter((input) => input !== '' || inputDef.option != 'optional').map((input) => {
+ return input instanceof caffe.Tensor ? new caffe.Value('', input.type, input) : value(input, null, null);
+ });
+ const argument = new caffe.Argument(inputDef.name, values);
+ this._inputs.push(argument);
+ inputIndex += count;
+ }
+ }
+ }
+ this._inputs.push(...inputs.slice(inputIndex).map((input) => {
+ return new caffe.Argument(inputIndex.toString(), [
+ input instanceof caffe.Tensor ? new caffe.Value('', input.type, input) : value(input, null, null)
+ ]);
+ }));
+
+ this._outputs = [];
+ const outputs = layer.output;
+ let outputIndex = 0;
+ if (this._type && this._type.outputs) {
+ for (const outputDef of this._type.outputs) {
+ if (outputIndex < outputs.length) {
+ const count = (outputDef.option == 'variadic') ? (outputs.length - outputIndex) : 1;
+ const values = outputs.slice(outputIndex, outputIndex + count).map((output) => value(output, null, null));
+ const argument = new caffe.Argument(outputDef.name, values);
+ this._outputs.push(argument);
+ outputIndex += count;
+ }
+ }
+ }
+ this._outputs.push(...outputs.slice(outputIndex).map((output, index) => {
+ return new caffe.Argument((outputIndex + index).toString(), [ value(output, null, null) ]);
+ }));
+ }
+
+ get type() {
+ return this._type;
+ }
+
+ get name() {
+ return this._name;
+ }
+
+ get inputs() {
+ return this._inputs;
+ }
+
+ get outputs() {
+ return this._outputs;
+ }
+
+ get attributes() {
+ return this._attributes;
+ }
+
+ get chain() {
+ return this._chain;
+ }
+};
+
+caffe.Attribute = class {
+
+ constructor(metadata, name, value, defaultValue) {
+ this._name = name;
+ this._value = value;
+ if (metadata && metadata.type) {
+ this._type = metadata.type;
+ }
+ if (value instanceof caffe.proto.BlobShape) {
+ this._value = new caffe.TensorShape(value.dim.map((dim) => dim.toNumber()));
+ this._type = 'shape';
+ }
+ if (metadata && metadata.visible === false) {
+ this._visible = false;
+ }
+ if (metadata && Object.prototype.hasOwnProperty.call(metadata, 'default')) {
+ defaultValue = metadata.default;
+ }
+ if (defaultValue !== undefined) {
+ if (this._value == defaultValue) {
+ this._visible = false;
+ } else if (Array.isArray(this._value) && Array.isArray(defaultValue)) {
+ if (this._value.length == defaultValue.length &&
+ this._value.every((item, index) => {
+ return item == defaultValue[index];
+ })) {
+ this._visible = false;
+ }
+ }
+ }
+ if (this._type) {
+ this._value = caffe.Utility.enum(this._type, this._value);
+ }
+ }
+
+ get type() {
+ return this._type;
+ }
+
+ get name() {
+ return this._name;
+ }
+
+ get value() {
+ return this._value;
+ }
+
+ get visible() {
+ return this._visible == false ? false : true;
+ }
+};
+
+caffe.Tensor = class {
+
+ constructor(blob) {
+ let shape = [];
+ if (Object.prototype.hasOwnProperty.call(blob, 'num') &&
+ Object.prototype.hasOwnProperty.call(blob, 'channels') &&
+ Object.prototype.hasOwnProperty.call(blob, 'width') &&
+ Object.prototype.hasOwnProperty.call(blob, 'height')) {
+ if (blob.num != 1) {
+ shape.push(blob.num);
+ }
+ if (blob.channels != 1) {
+ shape.push(blob.channels);
+ }
+ if (blob.height != 1) {
+ shape.push(blob.height);
+ }
+ if (blob.width != 1) {
+ shape.push(blob.width);
+ }
+ } else if (Object.prototype.hasOwnProperty.call(blob, 'shape')) {
+ shape = blob.shape.dim.map((dim) => dim.toNumber());
+ }
+
+ let dataType = '?';
+ if (blob.data.length > 0) {
+ dataType = 'float32';
+ this._values = blob.data;
+ } else if (blob.double_data.length > 0) {
+ dataType = 'float64';
+ this._values = blob.double_data;
+ }
+
+ this._type = new caffe.TensorType(dataType, new caffe.TensorShape(shape));
+ }
+
+ get category() {
+ return 'Blob';
+ }
+
+ get type() {
+ return this._type;
+ }
+
+ get encoding() {
+ return '|';
+ }
+
+ get values() {
+ return this._values;
+ }
+};
+
+caffe.TensorType = class {
+
+ constructor(dataType, shape) {
+ this._dataType = dataType;
+ this._shape = shape;
+ }
+
+ get dataType() {
+ return this._dataType;
+ }
+
+ get shape() {
+ return this._shape;
+ }
+
+ toString() {
+ return (this.dataType || '?') + this._shape.toString();
+ }
+};
+
+caffe.TensorShape = class {
+
+ constructor(dimensions) {
+ this._dimensions = dimensions;
+ }
+
+ get dimensions() {
+ return this._dimensions;
+ }
+
+ toString() {
+ return this._dimensions ? (`[${this._dimensions.map((dimension) => dimension.toString()).join(',')}]`) : '';
+ }
+};
+
+caffe.Utility = class {
+
+ static layerType(type) {
+ type = type || 0;
+ if (!caffe.Utility._layerTypeMap) {
+ caffe.Utility._layerTypeMap = new Map();
+ const known = { 'BNLL': 'BNLL', 'HDF5': 'HDF5', 'LRN': 'LRN', 'RELU': 'ReLU', 'TANH': 'TanH', 'ARGMAX': 'ArgMax', 'MVN': 'MVN', 'ABSVAL': 'AbsVal' };
+ for (const key of Object.keys(caffe.proto.V1LayerParameter.LayerType)) {
+ const value = caffe.proto.V1LayerParameter.LayerType[key];
+ caffe.Utility._layerTypeMap.set(value, key.split('_').map((item) => known[item] || item.substring(0, 1) + item.substring(1).toLowerCase()).join(''));
+ }
+ }
+ return caffe.Utility._layerTypeMap.has(type) ? caffe.Utility._layerTypeMap.get(type) : type.toString();
+ }
+
+ static enum(name, value) {
+ let type = caffe.proto;
+ const parts = name.split('.');
+ while (type && parts.length > 0) {
+ type = type[parts.shift()];
+ }
+ if (type) {
+ caffe.Utility._enumKeyMap = caffe.Utility._enumKeyMap || new Map();
+ if (!caffe.Utility._enumKeyMap.has(name)) {
+ const map = new Map(Object.entries(type).map(([name, value]) => [ value, name ]));
+ caffe.Utility._enumKeyMap.set(name, map);
+ }
+ const map = caffe.Utility._enumKeyMap.get(name);
+ if (map.has(value)) {
+ return map.get(value);
+ }
+ }
+ return value;
+ }
+};
+
+caffe.Error = class extends Error {
+
+ constructor(message) {
+ super(message);
+ this.name = 'Error loading Caffe model.';
+ }
+};
+
+export const ModelFactory = caffe.ModelFactory;
diff --git a/caffe2-metadata.json b/caffe2-metadata.json
new file mode 100644
index 00000000000..666813eaf7e
--- /dev/null
+++ b/caffe2-metadata.json
@@ -0,0 +1,17836 @@
+[
+ {
+ "name": "Abs",
+ "description": "\nCalculates the absolute value of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/abs_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Abs\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [ 0.3005476 1.551666 -1.3591481 0.39191285 -0.21866608]\nY: [0.3005476 1.551666 1.3591481 0.39191285 0.21866608]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Absolute value of input element-wise.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AbsGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Accumulate",
+ "description": "\nAccumulate operator accumulates the input tensor to the output tensor. If the\noutput tensor already has the right size, we add to it; otherwise, we first\ninitialize the output tensor to all zeros, and then do accumulation. Any\nfurther calls to the operator, given that no one else fiddles with the output\nin the interim, will do simple accumulations.\nAccumulation is done using Axpby operation as shown:\n Y = 1*X + gamma*Y\nwhere X is the input tensor, Y is the output tensor and gamma is the multiplier\nargument.\n",
+ "attributes": [
+ {
+ "description": "(float, default 1.0) Accumulation multiplier",
+ "name": "gamma",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The input tensor that has to be accumulated to the output tensor. If the output size is not the same as input size, the output tensor is first reshaped and initialized to zero, and only then, accumulation is done.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Accumulated output tensor",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AccumulateHistogram",
+ "description": "\nThis operator calculate thes histogram of values in input tensor.\nThere're 2 outputs, one for histogram of current input tensor, and another\nfor histogram of the all input tensors accumulated through history.\nThe output would contain num_buckets + 2 values. index[1 ... num_buckets]\nfor values in [lower_bound, upper_bound) interval. And the rest 2 for values\nsmaller than lower_bound or greater than upper_bound respectively.\n",
+ "attributes": [
+ {
+ "description": "the lower bound value",
+ "name": "lower_bound",
+ "option": "optional"
+ },
+ {
+ "description": "the upper bound value",
+ "name": "upper_bound",
+ "option": "optional"
+ },
+ {
+ "description": "number of buckets to use in [lower_bound, upper_bound)",
+ "name": "num_buckets",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output histogram of the current tensor.",
+ "name": "CurHist"
+ },
+ {
+ "description": "Accumulated histogram of the history tensor.",
+ "name": "AccHist"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Accuracy",
+ "description": "\nAccuracy takes two inputs- predictions and labels, and returns a float\naccuracy value for the batch. Predictions are expected in the form of 2-D tensor\ncontaining a batch of scores for various classes, and labels are expected in the\n form of 1-D tensor containing true label indices of samples in the batch. If\nthe score for the label index in the predictions is the highest among all\nclasses, it is considered a correct prediction.\n",
+ "attributes": [
+ {
+ "description": "Count as correct by comparing the true label to the top k scoring classes (default 1: only compare to the top scoring class i.e. argmax)",
+ "name": "top_k",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "2-D tensor (Tensor) of size (num_batches x num_classes) containing scores",
+ "name": "predictions"
+ },
+ {
+ "description": "1-D tensor (Tensor) of size (num_batches) having the indices of true labels",
+ "name": "labels"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D tensor (Tensor) of size 1 containing accuracy",
+ "name": "accuracy"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Acos",
+ "description": "\nCalculates the arccosine of the given input tensor, element-wise.\n",
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The arccosine of the input tensor computed element-wise",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AcosGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Adadelta",
+ "description": "\n\nComputes the AdaDelta update (https://arxiv.org/abs/1212.5701) for an input\ngradient and accumulated history of squared gradients. Concretely, given\ninputs (param, moment, moment_delta, grad, learning_rate), computes:\n\n new_moment = moment * decay + square(grad) * (1 - decay)\n new_grad = sqrt(moment_delta + epsilon) / sqrt(new_moment + epsilon) * grad\n new_param = param + learning_rate * new_grad\n new_moment_delta = moment_delta * decay + square(new_grad) * (1 - decay)\n\nand returns (new_param, new_moment, new_moment_delta).\n\n",
+ "attributes": [
+ {
+ "description": "Default 1e-5",
+ "name": "epsilon",
+ "option": "optional"
+ },
+ {
+ "description": "Default 0.95, the squared gradient sum is decayed by this factor.",
+ "name": "decay",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Parameters to be updated",
+ "name": "param"
+ },
+ {
+ "description": "Average of squared gradients",
+ "name": "moment"
+ },
+ {
+ "description": "Average of squared parameter updates",
+ "name": "moment_delta"
+ },
+ {
+ "description": "Gradient computed",
+ "name": "grad"
+ },
+ {
+ "description": "Learning rate",
+ "name": "lr"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Updated parameters",
+ "name": "output_param"
+ },
+ {
+ "description": "Updated average squared gradient",
+ "name": "output_moment"
+ },
+ {
+ "description": "Updated average of squared parameter updates",
+ "name": "output_moment_delta"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Adagrad",
+ "description": "\n\nComputes the AdaGrad update for an input gradient and accumulated\nhistory. Concretely, given inputs (param, grad, moment, learning_rate),\ncomputes\n\n new_moment = moment + square(grad)\n effective_lr = learning_rate / (sqrt(new_moment) + epsilon)\n update = learning_rate * grad / (sqrt(new_moment) + epsilon)\n new_param = param + update\nand returns (new_param, new_moment).\n\nOptionally returns effective_lr and update as well.\n\n",
+ "attributes": [
+ {
+ "description": "Default 1e-5",
+ "name": "epsilon",
+ "option": "optional"
+ },
+ {
+ "description": "Default 1. If it is in (0, 1), the gradient square sum is decayed by this factor.",
+ "name": "decay",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Parameters to be updated",
+ "name": "param"
+ },
+ {
+ "description": "Moment history",
+ "name": "moment"
+ },
+ {
+ "description": "Gradient computed",
+ "name": "grad"
+ },
+ {
+ "description": "learning rate",
+ "name": "lr"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Updated parameters",
+ "name": "output_param"
+ },
+ {
+ "description": "Updated moment",
+ "name": "output_moment"
+ },
+ {
+ "description": "(optional) Effective learning rate",
+ "name": "output_effective_lr"
+ },
+ {
+ "description": "(optional) Actual update that is applied.",
+ "name": "output_update"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Adam",
+ "description": "\n\nComputes the Adam update (https://arxiv.org/abs/1412.6980) for an\ninput gradient and momentum parameters. Concretely, given inputs\n(param, m1, m2, grad, lr, iters),\n\n t = iters + 1\n correction_multiplier = sqrt(1 - power(beta2, t)) /\n (1 - power(beta1, t))\n m1_o = (beta1 * m1) + (1 - beta1) * grad\n m2_o = (beta2 * m2) + (1 - beta2) * np.square(grad)\n grad_o = correction_multiplier * m1_o / \\\n (sqrt(m2_o) + epsilon)\n param_o = param + lr * grad_o\n\nand returns (param_o, m1_o, m2_o, grad_o), in which grad_o is an optional output\n\n",
+ "attributes": [
+ {
+ "description": "Default 0.9",
+ "name": "beta1",
+ "option": "optional"
+ },
+ {
+ "description": "Default 0.999",
+ "name": "beta2",
+ "option": "optional"
+ },
+ {
+ "description": "Default 1e-5",
+ "name": "epsilon",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Parameters to be updated",
+ "name": "param"
+ },
+ {
+ "description": "First moment history",
+ "name": "moment_1"
+ },
+ {
+ "description": "Second moment history",
+ "name": "moment_2"
+ },
+ {
+ "description": "Gradient computed",
+ "name": "grad"
+ },
+ {
+ "description": "learning rate",
+ "name": "lr"
+ },
+ {
+ "description": "iteration number",
+ "name": "iter"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Updated parameters",
+ "name": "output_param"
+ },
+ {
+ "description": "Updated first moment",
+ "name": "output_moment_1"
+ },
+ {
+ "description": "Updated second moment",
+ "name": "output_moment_2"
+ },
+ {
+ "description": "Optional Effective gradient",
+ "name": "output_grad"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Add",
+ "description": "\nPerforms element-wise binary addition (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Add\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"B\", np.array([[5,6],[7,8]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[1 2]\n [3 4]]\nB:\n[[5 6]\n [7 8]]\nC:\n[[ 6 8]\n [10 12]]\n\n```\n\n \n\n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AddGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "AddPadding",
+ "description": "\nGiven a partitioned tensor $T$, where the partitions are\ndefined as ranges on its outer-most (slowest varying) dimension $N$,\nreturn a tensor $T<(N + 2 * padding\\_width), D_1, ..., D_n>$ with paddings\nadded to the start and end of each range.\n\nOptionally, different paddings can be provided for beginning and end.\nPaddings provided must be a tensor $T$. If no padding is\nprovided, add zero padding. If no lengths vector is provided, add padding\nonly once, at the start and end of data.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sequence_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AddPadding\",\n [\"X\", \"lengths\"],\n [\"Y\", \"lengths_out\"],\n padding_width=1\n\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,2,2).astype(np.float32)))\nworkspace.FeedBlob(\"lengths\", np.array([3]).astype(np.int32))\n\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"lengths_out:\", workspace.FetchBlob(\"lengths_out\"))\n```\n\n**Result**\n\n```\nX: [[[0.2531572 0.4588472 ]\n [0.45140603 0.61161053]]\n\n [[0.92500854 0.8045306 ]\n [0.03356671 0.30233648]]\n\n [[0.4660227 0.6287745 ]\n [0.79372746 0.08609265]]]\nY: [[[0. 0. ]\n [0. 0. ]]\n\n [[0.2531572 0.4588472 ]\n [0.45140603 0.61161053]]\n\n [[0.92500854 0.8045306 ]\n [0.03356671 0.30233648]]\n\n [[0.4660227 0.6287745 ]\n [0.79372746 0.08609265]]\n\n [[0. 0. ]\n [0. 0. ]]]\nlengths_out: [5]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "Number of copies of padding to add around each range.",
+ "name": "padding_width",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "description": "[OPTIONAL] Specifies a different end-padding width. If this is not set, will use same as `padding_width`.",
+ "name": "end_padding_width",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* Input data ($T$).",
+ "name": "data_in"
+ },
+ {
+ "description": "*(type: Tensor``)* Number of elements in each range. sum(lengths) = N.",
+ "name": "lengths"
+ },
+ {
+ "description": "*(type: Tensor``)* [OPTIONAL] Padding data for range start ($T$).",
+ "name": "start_padding"
+ },
+ {
+ "description": "*(type: Tensor``)* [OPTIONAL] Padding for range end. If not provided, `start_padding` is used ($T$).",
+ "name": "end_padding"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor)* Padded data tensor ($T$).",
+ "name": "data_out"
+ },
+ {
+ "description": "*(type: Tensor``)* [OPTIONAL] Lengths for each padded range.",
+ "name": "lengths_out"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AffineChannel",
+ "category": "Normalization",
+ "description": "\nApplies a separate affine transformation to each channel of the input. Useful\nfor replacing spatial batch norm with its equivalent fixed transformation.\n",
+ "inputs": [
+ {
+ "description": "Feature map input with order NCHW or NHWC.",
+ "name": "X"
+ },
+ {
+ "description": "1D input of shape (C); the c-th element is the scale factor of the affine transformation for the c-th channel of the input.",
+ "name": "scale"
+ },
+ {
+ "description": "1D input of shape (C); the c-th element is the bias of the affine transformation for the c-th channel of the input.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output with the same order of Input.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AffineChannelGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Alias",
+ "description": "\nMakes the output and the input share the same underlying storage.\n\nWARNING: in general, in caffe2's operator interface different tensors should\nhave different underlying storage, which is the assumption made by\ncomponents such as the dependency engine and memory optimization. Thus, in\nnormal situations you should not use the AliasOp, especially in a normal\nforward-backward pass.\n\nThe Alias op is provided so one can achieve true asynchrony, such as\nHogwild, in a graph. But make sure you understand all the implications\nsimilar to multi-thread computation before you use it explicitly.\n",
+ "inputs": [
+ {
+ "description": "Input tensor whose storage will be shared.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor of same shape as input, sharing its storage.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AliasWithName",
+ "description": "\nSimilar with AliasOp, storing the alias name as operator argument.\n",
+ "attributes": [
+ {
+ "description": "name of the aliasing",
+ "name": "name",
+ "option": "optional"
+ },
+ {
+ "description": "weather or not to alias forward or backward",
+ "name": "is_backward",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor whose storage will be shared.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor of same shape as input, sharing its storage.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Allgather",
+ "description": "\nDoes an allgather operation among the nodes.\n",
+ "inputs": [
+ {
+ "description": "The common world.",
+ "name": "comm_world"
+ },
+ {
+ "description": "A tensor to be allgathered.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The allgathered tensor, same on all nodes.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Allreduce",
+ "description": "\nDoes an allreduce operation among the nodes. Currently only Sum is supported.\n",
+ "inputs": [
+ {
+ "description": "The common world.",
+ "name": "comm_world"
+ },
+ {
+ "description": "A tensor to be allreduced.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The allreduced tensor, same on all nodes.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "And",
+ "description": "\nPerforms element-wise logical operation **and** (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"And\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", (np.random.rand(3, 3) > 0.5))\nworkspace.FeedBlob(\"B\", (np.random.rand(3, 3) > 0.5))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n [[ True False False]\n [False True False]\n [False False True]]\nB:\n [[ True False True]\n [False False False]\n [False False False]]\nC:\n [[ True False False]\n [False False False]\n [False False False]]\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor of booleans. Has same dimensions as input `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "APMeter",
+ "description": "\nAPMeter computes Average Precision for binary or multi-class classification.\nIt takes two inputs: prediction scores P of size (n_samples x n_classes), and\ntrue labels Y of size (n_samples x n_classes). It returns a single float number\nper class for the average precision of that class.\n",
+ "attributes": [
+ {
+ "description": "(int32_t) indicates how many predictions should the op buffer. defaults to 1000",
+ "name": "buffer_size",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "2-D tensor (Tensor) of size (num_samples xnum_classes) containing prediction scores",
+ "name": "predictions"
+ },
+ {
+ "description": "2-D tensor (Tensor) of size (num_samples) containing true labels for each sample",
+ "name": "labels"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D tensor (Tensor) of size num_classes containing average precision for each class",
+ "name": "AP"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Append",
+ "description": "\nAppend input `B` to the end of input `A`.\n\n- It is required that this operation run in-place, meaning that the input `A` blob must match the output blob.\n- All except the outer-most dimension must be the same between `A` and `B`.\n- Input `A` may have to be re-allocated in order for accommodate to the new size. Currently, an exponential growth ratio is used in order to ensure amortized constant time complexity.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dataset_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Append\",\n [\"A\", \"B\"],\n [\"A\"],\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(1,3,3)))\nworkspace.FeedBlob(\"B\", np.random.randint(10, size=(2,3,3)))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"A:\", workspace.FetchBlob(\"A\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[[3 8 7]\n [1 6 6]\n [5 0 6]]]\nB:\n[[[4 3 1]\n [7 9 6]\n [9 4 5]]\n\n [[7 7 4]\n [9 8 7]\n [1 6 6]]]\nA:\n[[[3 8 7]\n [1 6 6]\n [5 0 6]]\n\n [[4 3 1]\n [7 9 6]\n [9 4 5]]\n\n [[7 7 4]\n [9 8 7]\n [1 6 6]]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "(*Tensor*): base input tensor of shape $(N, d_1, d_2, ..., d_n)$",
+ "name": "A"
+ },
+ {
+ "description": "(*Tensor*): second input tensor of shape $(M, d_1, d_2, ..., d_n)$ to be appended to the base",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor*): output tensor of shape $(N+M, d_1, d_2, ..., d_n)$",
+ "name": "A"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ArgMax",
+ "description": "\nRetrieve the argmax of an axis dimension specified by the `axis`\nargument. Given an input tensor and two arguments (`axis` and\n`keepdims`), returns a tensor containing the indices of the largest\nelement along the given axis. If the `keepdims` arg is *True* (default),\nthe shape of the output tensor matches the input tensor except the\n`axis` dimension equals 1. Else, the `axis` dimension of the output\ntensor is removed.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/arg_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ArgMax\",\n [\"X\"],\n [\"Indices\"],\n axis=2,\n keepdims=False\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(3,3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\n\n```\n\n**Result**\n\n```\nX: [[[4. 9. 6.]\n [6. 6. 1.]\n [9. 5. 4.]]\n\n [[6. 7. 4.]\n [7. 9. 1.]\n [3. 2. 8.]]\n\n [[3. 4. 6.]\n [5. 2. 7.]\n [1. 5. 7.]]]\nIndices: [[1 0 0]\n [1 1 2]\n [2 2 2]]\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "default": -1,
+ "description": "The axis to get argmax.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": true,
+ "description": "If True (default), the output tensor shape will match the input tensor shape except the `axis` dimension equals 1. Else, the `axis` dimension of the output tensor is removed.",
+ "name": "keepdims",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Tensor of indices for the largest values.",
+ "name": "Indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ArgMin",
+ "description": "\nRetrieve the argmin of an axis dimension specified by the `axis`\nargument. Given an input tensor and two arguments (`axis` and\n`keepdims`), returns a tensor containing the indices of the smallest\nelement along the given axis. If the `keepdims` arg is *True* (default),\nthe shape of the output tensor matches the input tensor except the\n`axis` dimension equals 1. Else, the `axis` dimension of the output\ntensor is removed.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/arg_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ArgMin\",\n [\"X\"],\n [\"Indices\"],\n axis=1\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(10, size=(5,5))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Indices:\", workspace.FetchBlob(\"Indices\"))\n\n```\n\n**Result**\n\n```\n\nX: [[9. 4. 6. 4. 1.]\n [5. 9. 8. 3. 4.]\n [6. 1. 0. 2. 9.]\n [7. 8. 2. 4. 9.]\n [3. 9. 4. 9. 4.]]\nIndices: [[4]\n [3]\n [2]\n [2]\n [0]]\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "default": -1,
+ "description": "The axis to get argmin.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": true,
+ "description": "If True (default), the output tensor shape will match the input tensor shape except the `axis` dimension equals 1. Else, the `axis` dimension of the output tensor is removed.",
+ "name": "keepdims",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Tensor of indices for the smallest values.",
+ "name": "Indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Asin",
+ "description": "\nCalculates the arcsine of the given input tensor, element-wise.\n",
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The arcsine of the input tensor computed element-wise",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AsinGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Assert",
+ "description": "\nTakes in a tensor of type *bool*, *int*, *long*, or *long long* and checks if all values are True when coerced into a boolean. In other words, for non-bool types this asserts that all values in the tensor are non-zero. If a value is False after coerced into a boolean, the operator throws an error. Else, if all values are True, nothing is returned. For tracability, a custom error message can be set using the `error_msg` argument.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/assert_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Assert\",\n [\"A\"],\n [],\n error_msg=\"Failed assertion from Assert operator\"\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(3,3)).astype(np.int32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\ntry:\n workspace.RunOperatorOnce(op)\nexcept RuntimeError:\n print(\"Assertion Failed!\")\nelse:\n print(\"Assertion Passed!\")\n\n```\n\n**Result**\n\n```\n\nA:\n[[7 5 6]\n [1 2 4]\n [5 3 7]]\nAssertion Passed!\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "description": "(*string*): custom error message to be thrown when the input does not pass assertion",
+ "name": "error_msg",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(*Tensor*): input tensor",
+ "name": "X"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AsyncNetBarrier",
+ "description": "\nThis is a pretty much no-op operator, since it's only purposes is make sure that\nasync_scheduling will schedule certian operations earlier than others.\n\nExaple where this operator can work well - mixture of data-parallel and model-\nparallel training, where one wants to force that all copies are started before\ndata-parallel part starts.\n",
+ "attributes": [
+ {
+ "description": "Specifies either inputs should be across different devices in dev inference options",
+ "name": "cross_device",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Atan",
+ "description": "\nCalculates the arctangent of the given input tensor, element-wise.\n",
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The arctangent of the input tensor computed element-wise",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AtanGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "ATen",
+ "support_level": "contribution"
+ },
+ {
+ "name": "AtomicAppend",
+ "support_level": "default"
+ },
+ {
+ "name": "AtomicFetchAdd",
+ "description": "\nGiven a mutex and two int32 scalar tensors, performs an atomic fetch add\nby mutating the first argument and adding it to the second input\nargument. Returns the updated integer and the value prior to the update.\n",
+ "inputs": [
+ {
+ "description": "Blob containing to a unique_ptr",
+ "name": "mutex_ptr"
+ },
+ {
+ "description": "Value to be mutated after the sum.",
+ "name": "mut_value"
+ },
+ {
+ "description": "Value to add to the first operand.",
+ "name": "increment"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Mutated value after sum. Usually same as input 1.",
+ "name": "mut_value"
+ },
+ {
+ "description": "Value of the first operand before sum.",
+ "name": "fetched_value"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AtomicFetchAdd64",
+ "description": "\nLike, AtomicFetchAdd but with int64_t scalar tensors,\nperforms an atomic fetch add\nby mutating the first argument and adding it to the second input\nargument. Returns the updated integer and the value prior to the update.\n",
+ "inputs": [
+ {
+ "description": "Blob containing to a unique_ptr",
+ "name": "mutex_ptr"
+ },
+ {
+ "description": "Value to be mutated after the sum.",
+ "name": "mut_value"
+ },
+ {
+ "description": "Value to add to the first operand.",
+ "name": "increment"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Mutated value after sum. Usually same as input 1.",
+ "name": "mut_value"
+ },
+ {
+ "description": "Value of the first operand before sum.",
+ "name": "fetched_value"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AtomicIter",
+ "description": "\nSimilar to Iter, but takes a mutex as the first input to make sure that\nupdates are carried out atomically. This can be used in e.g. Hogwild sgd\nalgorithms.\n",
+ "inputs": [
+ {
+ "description": "The mutex used to do atomic increment.",
+ "name": "mutex"
+ },
+ {
+ "description": "The iter counter as an int64_t TensorCPU.",
+ "name": "iter"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AveragedLoss",
+ "description": "\nThe *AveragedLoss* op takes a single 1-D input tensor *input* and returns a single output float value *output*. The output represents the average of the values in *input*. This op is commonly used for averaging losses, hence the name, however it does not exclusively operate on losses.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/loss_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/loss_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragedLoss\",\n [\"input\"],\n [\"output\"],\n)\n\nworkspace.FeedBlob(\"input\", np.array([8, 10, 12]).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output: \\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [ 8. 10. 12.]\noutput:\n 10.0\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "The input data as Tensor",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output tensor of size 1 containing the averaged value.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AveragedLossGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool",
+ "category": "Pool",
+ "description": "AveragePool \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool1D",
+ "description": "AveragePool1D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool1DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool2D",
+ "description": "AveragePool2D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool2DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool3D",
+ "description": "AveragePool3D \nconsumes an input blob and applies average pooling across the the blob according\nto kernel sizes, stride sizes, pad lengths and dilation. Average pooling consists\nof taking the average value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"AveragePool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-0.2883434 0.43498734 0.05417408 1.912558 0.09390241\n -0.33173105]\n [ 1.633709 1.2047161 0.36964908 0.99961185 0.4184147\n 0.9989975 ]\n [ 1.7644193 0.1789665 1.5812988 -0.6038542 -0.36090398\n 0.33195344]\n [ 0.9457722 -0.95174325 -0.78124577 1.2062047 1.1903144\n 0.2586746 ]\n [ 1.252104 0.32645547 1.8073524 -0.78397465 0.9978303\n -0.97614396]\n [ 0.5440196 1.5778259 -0.76750124 0.5051756 0.8838398\n -0.37085298]]]]\n\nY:\n [[[[0.7462672 0.83399826 0.2948959 ]\n [0.4843537 0.3506009 0.35500962]\n [0.9251013 0.19026303 0.13366827]]]]\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePool3DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePoolGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "AveragePut",
+ "description": "\n Consume a value and pushes it to the global stat registry as an average.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ",
+ "attributes": [
+ {
+ "description": "(*str*): name of the stat. If not present, then uses name of input blob",
+ "name": "name",
+ "option": "optional"
+ },
+ {
+ "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers",
+ "name": "magnitude_expand",
+ "option": "optional"
+ },
+ {
+ "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed",
+ "name": "bound",
+ "option": "optional"
+ },
+ {
+ "description": "(*float*): Optionally provide a default value for receiving empty tensors",
+ "name": "default_value",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(*Tensor``*): A scalar tensor, representing any numeric value",
+ "name": "value"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Barrier",
+ "description": "\nDoes a barrier operation among the nodes.\n",
+ "inputs": [
+ {
+ "description": "The common world.",
+ "name": "comm_world"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchBoxCox",
+ "description": "\nInput `data` is a N * D matrix. Apply box-cox transform for each column.\n`lambda1` and `lambda2` is of size D that defines the hyper-parameters for\nthe transform of each column `x` of the input `data`:\n\n ln(x + lambda2), if lambda1 == 0\n ((x + lambda2)^lambda1 - 1)/lambda1, if lambda1 != 0\n\n",
+ "inputs": [
+ {
+ "description": "input float or double N * D matrix",
+ "name": "data"
+ },
+ {
+ "description": "tensor of size D with the same type as data",
+ "name": "lambda1"
+ },
+ {
+ "description": "tensor of size D with the same type as data",
+ "name": "lambda2"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "output matrix that applied box-cox transform",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchBucketize",
+ "description": "\nBucketize the float_features into sparse features.\nThe float_features is a N * D tensor where N is the batch_size, and D is the feature_dim.\nThe indices is a 1D tensor containing the indices of the features that need to be bucketized.\nThe lengths is a 1D tensor that splits the following 'boundaries' argument.\nThe boundaries is a 1D tensor containing the border list for each feature.\n\nWith in each batch, `indices` should not have duplicate number,\nand the number of elements in `indices` should be less than or equal to `D`.\nEach element in `lengths` vector (lengths[`i`]) represents\nthe number of boundaries in the sub border list.\nThe sum of all elements in `lengths` must be equal to the size of `boundaries`.\nIf lengths[0] = 2, the first sub border list is [0.5, 1.0], which separate the\nvalue to (-inf, 0.5], (0,5, 1.0], (1.0, inf). The bucketized feature will have\nthree possible values (i.e. 0, 1, 2).\n\n\nFor example, with input:\n\n float_features = [[1.42, 2.07, 3.19, 0.55, 4.32],\n [4.57, 2.30, 0.84, 4.48, 3.09],\n [0.89, 0.26, 2.41, 0.47, 1.05],\n [0.03, 2.97, 2.43, 4.36, 3.11],\n [2.74, 5.77, 0.90, 2.63, 0.38]]\n indices = [0, 1, 4]\n lengths = [2, 3, 1]\n boundaries = [0.5, 1.0, 1.5, 2.5, 3.5, 2.5]\n\nThe output is:\n\n output =[[2, 1, 1],\n [2, 1, 1],\n [1, 0, 0],\n [0, 2, 1],\n [2, 3, 0]]\n\nafter running this operator.\n",
+ "inputs": [
+ {
+ "description": "2-D dense tensor, the second dimension must be greater or equal to the indices dimension",
+ "name": "float_features"
+ },
+ {
+ "description": "Flatten tensor, containing the indices of `float_features` to be bucketized. The datatype must be int32.",
+ "name": "indices"
+ },
+ {
+ "description": "Flatten tensor, the size must be equal to that of `indices`. The datatype must be int32.",
+ "name": "lengths"
+ },
+ {
+ "description": "Flatten tensor, dimension has to match the sum of lengths",
+ "name": "boundaries"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2-D dense tensor, with 1st dim = float_features.dim(0), 2nd dim = size(indices)in the arg list, the tensor is of the same data type as `feature`.",
+ "name": "bucktized_feat"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchBucketOneHot",
+ "description": "\nInput is a matrix tensor. Its first dimension is the batch\nsize. For each column, bucketize it based on the boundary values and then do\none hot encoding. The `lengths` specifies the number of boundary values for each\ncolumn. The final number of buckets is this number plus 1. This would also be\nthe expanded feature size. `boundaries` specifies all the boundary values.\nNote that each bucket is right-inclusive. That is, given boundary values\n[b1, b2, b3], the buckets are defined as (-int, b1], (b1, b2], (b2, b3], (b3, inf).\nFor example\n\n data = [[2, 3], [4, 1], [2, 5]], lengths = [2, 3],\n If boundaries = [0.1, 2.5, 1, 3.1, 4.5], then\n output = [[0, 1, 0, 0, 1, 0, 0], [0, 0, 1, 1, 0, 0, 0], [0, 1, 0, 0, 0, 0, 1]]\n\n If boundaries = [0.1, 2.5, 1, 1, 3.1], then\n output = [[0, 1, 0, 0, 0, 1, 0], [0, 0, 1, 0, 1, 0, 0], [0, 1, 0, 0, 0, 0, 1]]\n\n",
+ "inputs": [
+ {
+ "description": "input tensor matrix",
+ "name": "data"
+ },
+ {
+ "description": "the size is the same as the width of the `data`",
+ "name": "lengths"
+ },
+ {
+ "description": "bucket boundaries",
+ "name": "boundaries"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "output matrix that expands each input column with one hot encodingbased on the bucketization",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchDenseToSparse",
+ "description": "\nThis Op is a inverse of BatchSparseToDenseOp.\nBasically, given a `lengths` vector, a `indices` vector,\nand a dense matrix `dense`, output `value` vector so that, along with\n`lengths` vector and `indices` vector, forms a sparse representation\nof the dense matrix.\n\nA sparse matrix is represented by `lengths` vector, `indices` vector,\nand `values` vector. Each element in `lengths` vector (lengths[`i`]) represents\nthe number of indices in this batch (batch `i`).\nWith in each batch, `indices` should not have duplicate number.\n\nFor example, with input:\n\n lengths = [2, 3, 1]\n indices = [0, 1, 2, 3, 4, 5]\n output = [[6, 7, 0, 0, 0, 0],\n [0, 0, 8, 9, 10, 0],\n [0, 0, 0, 0, 0, 11]]\n\nThe output is:\n\n values = [6, 7, 8, 9, 10, 11]\n\nafter running this operator.\n",
+ "inputs": [
+ {
+ "description": "Flatten lengths, Used to break down indices into per batch indices",
+ "name": "lengths"
+ },
+ {
+ "description": "Flatten indices, tensor of total size = \\sum lengths, containing the indices ",
+ "name": "indices"
+ },
+ {
+ "description": "dense 2-D tensor, first dim = len(lengths), last dim > Any(indices)",
+ "name": "dense"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Values, tensor of the same size as `indices` and same data type as dense tensor.",
+ "name": "values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchGather",
+ "description": "\nBatch gather operation, first dimension in DATA is the batch size.\nGiven DATA tensor of rank r >= 2, and INDICES tensor of rank q >= 1, gather\nentries of the second outer dimension (axis == 1) of DATA indexed by INDICES,\nand concatenate them in an output tensor of rank q + (r - 1).\n\nExample:\n DATA = [\n [1.0, 1.2, 2.4, 4.5],\n [2.3, 3.4, 3.6, 2.3],\n [4.5, 5.7, 1.2, 4.5],\n ]\n INDICES = [0, 2]\n\n OUTPUT = [\n [1.0, 2.4],\n [2.3, 3.6],\n [4.5, 1.2],\n ]\n",
+ "inputs": [
+ {
+ "description": "Tensor of rank r >= 2.",
+ "name": "DATA"
+ },
+ {
+ "description": "Tensor of int32/int64 indices, of any rank q.",
+ "name": "INDICES"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor of rank q + (r - 1).",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchGatherGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "BatchMatMul",
+ "description": "\nBatch Matrix multiplication Yi = Ai * Bi, where A has shape (dim0, dim1, ... M, K),\nB has shape (dim0, dim1, ... K, N), Y has shape (dim0, dim1, ... M, N) and i ranges\nfrom 0 to (dim0 * dim1 ...) - 1. rank(A) == rank(B) >= 2. In case of A and B being\ntwo dimensional, it behaves like normal matrix multiplication.\n",
+ "attributes": [
+ {
+ "description": "Pass 1 to transpose the last two dimensions of A before doing multiplication",
+ "name": "trans_a",
+ "option": "optional"
+ },
+ {
+ "description": "Pass 1 to transpose the last two dimensions of B before doing multiplication",
+ "name": "trans_b",
+ "option": "optional"
+ },
+ {
+ "description": "Pass 1 to allow broadcasting of dimensions. Behavior is the same as numpy.matmul. Gradient is currently not supported when running in broadcast mode.",
+ "name": "broadcast",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "tensor of shape (dim0, dim1 ... M, K)",
+ "name": "A"
+ },
+ {
+ "description": "tensor of shape (dim0, dim1 ... K, N)",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "tensor of shape (dim0, dim1 ... M, N)",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchMoments",
+ "support_level": "default"
+ },
+ {
+ "name": "BatchMomentsGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "BatchOneHot",
+ "description": "\nInput is a matrix tensor. Its first dimension is the batch\nsize. Expand each column of it using one hot encoding. The `lengths` specifies\nthe size of each column after encoding, and the `values` is the dictionary value\nof one-hot encoding for each column. For example\n\n If data = [[2, 3], [4, 1], [2, 5]], lengths = [2, 3],\n and values = [2, 4, 1, 3, 5], then\n\n output = [[1, 0, 0, 1, 0], [0, 1, 1, 0, 0], [1, 0, 0, 0, 1]]\n",
+ "inputs": [
+ {
+ "description": "input tensor matrix",
+ "name": "data"
+ },
+ {
+ "description": "the size is the same as the width of the `data`",
+ "name": "lengths"
+ },
+ {
+ "description": "one hot encoding dictionary values",
+ "name": "values"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "output matrix that expands each input column with one hot encoding",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchPermutation",
+ "description": "\nBatch permutation of an input tensor X given input indices. First dimension of\nX equals batch size N. The indices stores a be permutation of N.\nThe output Y is a tensor of same shape as X, with data re-ordered according to\nthe indices within the batch size.\n\nExample of batch permutation on a 2-D tensor with batch size 4:\n X = [\n [1, 5, 2, 3, 4, 6, 0],\n [4, 3, 3, 5, 2, 3, 1],\n [2, 2, 3, 6, 0, 0, 1],\n [0, 0, 1, 1, 2, 2, 3]\n ]\n indices = [2, 0, 1, 3]\n Y = [\n [2, 2, 3, 6, 0, 0, 1],\n [1, 5, 2, 3, 4, 6, 0],\n [4, 3, 3, 5, 2, 3, 1],\n [0, 0, 1, 1, 2, 2, 3]\n ]\n\nExample of batch permutation on a 3-D tensor with batch size 4:\n X = [\n [[1, 5, 2], [3, 4, 6, 0]],\n [[4, 3, 3], [5, 2, 3, 1]],\n [[2, 2, 3], [6, 0, 0, 1]],\n [[0, 0, 1], [1, 2, 2, 3]]\n ]\n indices = [2, 0, 1, 3]\n Y = [\n [[2, 2, 3], [6, 0, 0, 1]],\n [[1, 5, 2], [3, 4, 6, 0]],\n [[4, 3, 3], [5, 2, 3, 1]],\n [[0, 0, 1], [1, 2, 2, 3]]\n ]\n",
+ "inputs": [
+ {
+ "description": "Input tensor, where 1st dimension equals batch size",
+ "name": "X"
+ },
+ {
+ "description": "Input indices of batch to permute",
+ "name": "indices"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output permuted tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchPermutationGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "BatchSparseToDense",
+ "description": "\nConvert sparse matrix representation into dense matrix.\n\nA sparse matrix is represented by `lengths` vector, `indices` vector,\nand `values` vector. Each element in `lengths` vector (lengths[`i`]) represents\nthe number of indices in this batch (batch `i`).\nWith in each batch, `indices` should not have duplicate number.\n\nFor example, with input:\n\n lengths = [2, 3, 1]\n indices = [0, 1, 2, 3, 4, 5]\n values = [6, 7, 8, 9, 10, 11]\n dense_dim = 6\n default_value = 0\n\nThe output is:\n\n output = [[6, 7, 0, 0, 0, 0],\n [0, 0, 8, 9, 10, 0],\n [0, 0, 0, 0, 0, 11]]\n\nafter running this operator.\n",
+ "attributes": [
+ {
+ "description": "Optional, output dense last dimension. If both this argument and output_shape_inference are set, it should be consistent with output_shape_inference's last dim",
+ "name": "dense_last_dim",
+ "option": "optional"
+ },
+ {
+ "description": "Optional, missing values are filled with this value.default_value = 0 when not set",
+ "name": "default_value",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Flatten tensor, used to break down indices and values into per batch indices and values.",
+ "name": "lengths"
+ },
+ {
+ "description": "Flatten tensor of total size = \\sum lengths, containing the indices ",
+ "name": "indices"
+ },
+ {
+ "description": "Data tensor, dimension has to match `indices`",
+ "name": "values"
+ },
+ {
+ "description": "Optional, a dense tensor whose shape define the output shape",
+ "name": "output_shape_inference"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2-D dense tensor, with 1st dim = len(lengths), 2nd dim = dense_last_dimin the arg list, the tensor is of the same data type as `values`.Missing values are filled with default_value",
+ "name": "dense"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BatchToSpace",
+ "description": "\nRearranges (permutes) data from batch into blocks of spatial data, followed by cropping. This is the reverse transformation of `SpaceToBatch`. More specifically, this op outputs a copy of the input tensor where values from the batch dimension are moved in spatial blocks to the height and width dimensions, followed by cropping along the height and width dimensions. Only \"NCHW\" order is currently supported.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/space_batch_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BatchToSpace\",\n [\"X\"],\n [\"Y\"],\n pad=3\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(10,3,32,32).astype(np.float32))\nprint(\"X.shape:\", workspace.FetchBlob(\"X\").shape)\nworkspace.RunOperatorOnce(op)\nprint(\"Y.shape:\", workspace.FetchBlob(\"Y\").shape)\n\n```\n\n**Result**\n\n```\n\nX.shape: (10, 3, 32, 32)\nY.shape: (2, 3, 58, 58)\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "(*int*): exclusive axis that divides the first and second dimension of matrix `A` (default=0)",
+ "name": "pad",
+ "option": "optional"
+ },
+ {
+ "description": "(*int*): height/width of spatial blocks to be moved (default=2)",
+ "name": "block_size",
+ "option": "optional"
+ },
+ {
+ "description": "(*string*): order of dimensions of input and output blobs; only \"NCHW\" order is currently supported (default=\"NCHW\")",
+ "name": "order",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(*Tensor``*): input tensor (NCHW order)",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor``*): output tensor (NCHW order)",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BBoxTransform",
+ "description": "\nTransform proposal bounding boxes to target bounding box using bounding box\n regression deltas.\n",
+ "attributes": [
+ {
+ "description": "vector weights [wx, wy, ww, wh] for the deltas",
+ "name": "weights",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default true), transform the boxes to the scaled image space after applying the bbox deltas.Set to false to match the detectron code, set to true for keypoint models and for backward compatibility",
+ "name": "apply_scale",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default false), Correct bounding box transform coordates, see bbox_transform() in boxes.py Set to true to match the detectron code, set to false for backward compatibility",
+ "name": "correct_transform_coords",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default false). If true, then boxes (rois and deltas) include angle info to handle rotation. The format will be [ctr_x, ctr_y, width, height, angle (in degrees)].",
+ "name": "rotated",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default true). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].",
+ "name": "angle_bound_on",
+ "option": "optional"
+ },
+ {
+ "description": "int (default -90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].",
+ "name": "angle_bound_lo",
+ "option": "optional"
+ },
+ {
+ "description": "int (default 90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].",
+ "name": "angle_bound_hi",
+ "option": "optional"
+ },
+ {
+ "description": "float (default 1.0 degrees). For RRPN, clip almost horizontal boxes within this threshold of tolerance for backward compatibility. Set to negative value for no clipping.",
+ "name": "clip_angle_thresh",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Bounding box proposals in pixel coordinates, Size (M, 4), format [x1, y1, x2, y2], orSize (M, 5), format [batch_index, x1, y1, x2, y2]. If proposals from multiple images in a batch are present, they should be grouped sequentially and in incremental order.For rotated boxes, this would have an additional angle (in degrees) in the format [, ctr_x, ctr_y, w, h, angle].",
+ "name": "rois"
+ },
+ {
+ "description": "bounding box translations and scales,size (M, 4*K), format [dx, dy, dw, dh], K = # classes. For rotated boxes, size (M, 5*K, format [dx, dy, dw, dh, da].",
+ "name": "deltas"
+ },
+ {
+ "description": "Image dimensions, size (batch_size, 3), format [img_height, img_width, img_scale]",
+ "name": "im_info"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Pixel coordinates of the transformed bounding boxes,Size (M, 4*K), format [x1, y1, x2, y2]. For rotated boxes, size (M, 5*K), format [ctr_x, ctr_y, w, h, angle].",
+ "name": "box_out"
+ },
+ {
+ "description": "Tensor of shape (batch_size) with each element denoting the number of RoIs belonging to the corresponding image in batch",
+ "name": "roi_batch_splits"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BernoulliJSD",
+ "description": "\nComputes the Jensen-Shannon divergence (JSD) between two Bernoulli distributions\nwhere each is parametrized by a single probability.\n",
+ "inputs": [
+ {
+ "description": "array of probabilities for target",
+ "name": "T"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "array of JSD losses",
+ "name": "L"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BernoulliJSDGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "BisectPercentile",
+ "description": "\n This operator is to map raw feature values into the percentile\n representations based on Bisection for more than one feature.\n\n The input is the bath of input feature values, with the size of (batch_size,\n num_feature), where num_feature = F (F >= 1).\n\n For each feature, we also need additional information regarding the feature\n value distribution.\n There are several vectors to keep data to percentile mappping information\n as arguments (context):\n 1. feature raw values (R)\n 2. feature percentile mapping (P)\n 3. feature percentile lower bound (L)\n 4. feature percentile upper bound (U)\n\n A toy example:\n Suppose the sampled data distribution is as follows:\n 1, 1, 2, 2, 2, 2, 2, 2, 3, 4\n We have the mapping vectors as follows:\n R = [1, 2, 3, 4]\n P = [0.15, 0.55, 0.9, 1.0]\n L = [0.1, 0.3, 0.9, 1.0]\n U = [0.2, 0.8, 0.9, 1.0]\n Where P is computed as (L + U) / 2.\n\n For a given list of feature values, X = [x_0, x_1, ..., x_i, ...], for each\n feature value (x_i) we first apply bisection to find the right index (t),\n such that R[t] <= x_i < R[t+1].\n If x_i = R[t], P[t] is returned;\n otherwise, the interpolation is apply by (R[t], R[t+1]) and (U[t] and L[t]).\n\n As there are F features (F >= 1), we concate all the R_f, P_f, L_f, and\n U_f for each feature f and use an additional input length to keep track of\n the number of points for each set of raw feature value to percentile mapping.\n For example, there are two features:\n R_1 =[0.1, 0.4, 0.5];\n R_2 = [0.3, 1.2];\n We will build R = [0.1, 0.4, 0.5, 0.3, 1.2]; besides, we have\n lengths = [3, 2]\n to indicate the boundaries of the percentile information.\n\n",
+ "attributes": [
+ {
+ "description": "1D tensor, which is the concatenation of all sorted raw feature values for all features.",
+ "name": "percentile_raw",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor. There is one-one mapping between percentile_mapping and percentile_raw such that each element in percentile_mapping corresponds to the percentile value of the corresponding raw feature value.",
+ "name": "percentile_mapping",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile lower bound of the corresponding raw feature value.",
+ "name": "percentile_lower",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile upper bound of the corresponding raw feature value.",
+ "name": "percentile_upper",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor. There is one-one mapping between percentile_upper and percentile_raw such that each element in percentile_mapping corresponds to the percentile upper bound of the corresponding raw feature value.",
+ "name": "lengths",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input 2D tensor of floats of size (N, D), where N is the batch size and D is the feature dimension.",
+ "name": "raw_values"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2D tensor of output with the same dimensions as the input raw_values.",
+ "name": "percentile"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BitwiseAnd",
+ "description": "\nPerforms element-wise bitwise operation `bitwise_and` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* First operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BitwiseOr",
+ "description": "\nPerforms element-wise bitwise operation `bitwise_or` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* First operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BitwiseXor",
+ "description": "\nPerforms element-wise bitwise operation `bitwise_xor` (with limited broadcast support).\nBoth input operands should be of type `bool`.\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* First operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor)* Output tensor. Has same dimensions as input `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BooleanMask",
+ "description": "\nGiven a 1D `data` tensor and a boolean `mask` tensor of the same shape, returns a `masked_data` tensor containing only the elements corresponding to positions where the `mask` is True, and a `masked_indices` tensor containing the indices of the True elements.\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanMask\",\n [\"data\", \"mask\"],\n [\"masked_data\", \"masked_indices\"]\n)\n\nworkspace.FeedBlob(\"data\", np.array([1,2,3,4,5,6]))\nworkspace.FeedBlob(\"mask\", np.array([True,False,False,True,True,False]))\nprint(\"data:\", workspace.FetchBlob(\"data\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\nworkspace.RunOperatorOnce(op)\nprint(\"masked_data:\", workspace.FetchBlob(\"masked_data\"))\nprint(\"masked_indices:\", workspace.FetchBlob(\"masked_indices\"))\n\n```\n\n**Result**\n\n```\n\ndata: [1 2 3 4 5 6]\nmask: [ True False False True True False]\nmasked_data: [1 4 5]\nmasked_indices: [0 3 4]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "(*Tensor*): 1D input tensor",
+ "name": "data"
+ },
+ {
+ "description": "(*Tensor``*): tensor of bools which determines the input elements that will be left in the `masked_data` output tensor; same shape as `data`",
+ "name": "mask"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor*): 1D tensor of same type as `data` input that contains the masked input tensor",
+ "name": "masked_data"
+ },
+ {
+ "description": "(*Tensor``*): 1D tensor of indices of the True elements in the `mask` tensor",
+ "name": "masked_indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BooleanMaskGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "BooleanMaskLengths",
+ "description": "\nGiven a tensor of int32 `lengths` tensor representing segment lengths and a `mask` (boolean) tensor, return the segment lengths of the corresponding segmented tensor after **BooleanMask** is applied.\n\nIf `lengths` tensor is $[a_1, a_2, ..., a_n]$, then length of `mask` tensor must be $a_1 + a_2 + ... + a_n$.\n\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_mask_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanMaskLengths\",\n [\"lengths\", \"mask\"],\n [\"masked_lengths\"]\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([1,3,2], dtype=np.int32))\nworkspace.FeedBlob(\"mask\", np.array([False,True,True,False,True,True]))\nprint(\"lengths:\", workspace.FetchBlob(\"lengths\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\nworkspace.RunOperatorOnce(op)\nprint(\"masked_lengths:\", workspace.FetchBlob(\"masked_lengths\"))\n\n```\n\n**Result**\n\n```\n\nlengths: [1 3 2]\nmask: [False True True False True True]\nmasked_lengths: [0 2 2]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "(*Tensor``*): input tensor containing segment lengths",
+ "name": "lengths"
+ },
+ {
+ "description": "(*Tensor``*): A 1D bool tensor of values to keep.",
+ "name": "mask"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor``*): 1D tensor of same type as inputs that contains the sequence",
+ "name": "masked_lengths"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BooleanUnmask",
+ "description": "\nGiven a series of masks and values, reconstruct values together according to masks. A comprehensive example:\n```\nmask1 = True, False, True, False, False\nvalues1 = 1.0, 3.0\nmask2 = False, True, False, False, False\nvalues2 = 2.0\nmask3 = False, False, False, True, True\nvalues3 = 4.0, 5.0\n```\n\nReconstruct by:\n\n```\noutput = net.BooleanUnmask([mask1, values1, mask2, values2, mask3, values3], [\"output\"])\noutput = 1.0, 2.0, 3.0, 4.0, 5.0\n```\n\nNote that for all mask positions, there must be at least one True. This is not allowed:\n\n```\nmask1 = True, False\nvalues1 = 1.0\nmask2 = False, False\nvalues2 =\n\noutput = net.BooleanUnmask([mask1, values1, mask2, values2], [\"output\"])\n```\n\nIf there are multiple True values for a field, we accept the first value, and no longer expect a value for that location:\n\n```\nmask1 = True, False\nvalues1 = 1.0\nmask2 = True, True\nvalues2 = 2.0\n\noutput = net.BooleanUnmask([mask1, values1, mask2, values2], [\"output\"])\noutput = 1.0, 2.0\n```\n\n*** Note that we alternate `data` and `mask` inputs\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/boolean_unmask_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"BooleanUnmask\",\n [\"mask1\", \"data1\", \"mask2\", \"data2\"],\n [\"unmasked_data\"]\n)\n\nworkspace.FeedBlob(\"mask1\", np.array([True,False,False,True,True,False]))\nworkspace.FeedBlob(\"data1\", np.array([1,4,5]))\nworkspace.FeedBlob(\"mask2\", np.array([False,True,True,False,False,True]))\nworkspace.FeedBlob(\"data2\", np.array([2,3,6]))\n\nprint(\"data1:\", workspace.FetchBlob(\"data1\"))\nprint(\"mask1:\", workspace.FetchBlob(\"mask1\"))\nprint(\"data2:\", workspace.FetchBlob(\"data2\"))\nprint(\"mask2:\", workspace.FetchBlob(\"mask2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"unmasked_data:\", workspace.FetchBlob(\"unmasked_data\"))\n\n```\n\n**Result**\n\n```\n\ndata1: [1 4 5]\nmask1: [ True False False True True False]\ndata2: [2 3 6]\nmask2: [False True True False False True]\nunmasked_data: [1 2 3 4 5 6]\n\n```\n\n \n",
+ "inputs": [
+ {
+ "description": "(*Tensor*): 1D input tensor(s)",
+ "name": "data"
+ },
+ {
+ "description": "(*Tensor``*): 1D boolean mask tensor(s)",
+ "name": "mask"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor*): 1D tensor of same type as `data` input that contains the unmasked input tensor",
+ "name": "unmasked_data"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BoxWithNMSLimit",
+ "description": "\nApply NMS to each class (except background) and limit the number of\nreturned boxes.\n",
+ "attributes": [
+ {
+ "description": "(float) TEST.SCORE_THRESH",
+ "name": "score_thresh",
+ "option": "optional"
+ },
+ {
+ "description": "(float) TEST.NMS",
+ "name": "nms",
+ "option": "optional"
+ },
+ {
+ "description": "(int) TEST.DEECTIONS_PER_IM",
+ "name": "detections_per_im",
+ "option": "optional"
+ },
+ {
+ "description": "(bool) TEST.SOFT_NMS.ENABLED",
+ "name": "soft_nms_enabled",
+ "option": "optional"
+ },
+ {
+ "description": "(string) TEST.SOFT_NMS.METHOD",
+ "name": "soft_nms_method",
+ "option": "optional"
+ },
+ {
+ "description": "(float) TEST.SOFT_NMS.SIGMA",
+ "name": "soft_nms_sigma",
+ "option": "optional"
+ },
+ {
+ "description": "(float) Lower bound on updated scores to discard boxes",
+ "name": "soft_nms_min_score_thres",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default false). If true, then boxes (rois and deltas) include angle info to handle rotation. The format will be [ctr_x, ctr_y, width, height, angle (in degrees)].",
+ "name": "rotated",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Scores, size (count, num_classes)",
+ "name": "scores"
+ },
+ {
+ "description": "Bounding box for each class, size (count, num_classes * 4). For rotated boxes, this would have an additional angle (in degrees) in the format [, ctr_x, ctr_y, w, h, angle]. Size: (count, num_classes * 5).",
+ "name": "boxes"
+ },
+ {
+ "description": "Tensor of shape (batch_size) with each element denoting the number of RoIs/boxes belonging to the corresponding image in batch. Sum should add up to total count of scores/boxes.",
+ "name": "batch_splits"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Filtered scores, size (n)",
+ "name": "scores"
+ },
+ {
+ "description": "Filtered boxes, size (n, 4). For rotated boxes, size (n, 5), format [ctr_x, ctr_y, w, h, angle].",
+ "name": "boxes"
+ },
+ {
+ "description": "Class id for each filtered score/box, size (n)",
+ "name": "classes"
+ },
+ {
+ "description": "Output batch splits for scores/boxes after applying NMS",
+ "name": "batch_splits"
+ },
+ {
+ "description": "Optional filtered indices, size (n)",
+ "name": "keeps"
+ },
+ {
+ "description": "Optional number of filtered indices per class, size (num_classes)",
+ "name": "keeps_size"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "BRGNCHWCToPackedInt8BGRAStylizerDeprocess",
+ "support_level": "default"
+ },
+ {
+ "name": "Broadcast",
+ "description": "\nDoes a broadcast operation from the root node to every other node. The tensor\non each node should have been pre-created with the same shape and data type.\n",
+ "attributes": [
+ {
+ "description": "(int, default 0) the root to run broadcast from.",
+ "name": "root",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The common world.",
+ "name": "comm_world"
+ },
+ {
+ "description": "A tensor to be broadcasted.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "In-place as input 1.",
+ "name": "X"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Bucketize",
+ "description": "\nThis operator works as bucketize in tensorflow and digitize\nin numpy. It bucketizes the input 'X' based on argument 'boundaries'.\nFor each value x in input 'data', the operator returns index i given\nboundaries[i-1] < x <= boundaries[i].\nIf values in 'data' are beyond the bounds of boundaries, 0 or\nlen(boundaries) is returned as appropriate.\nThe boundaries need to be monotonically increasing.\nFor example\n\nIf data = [2, 4, 1] and boundaries = [0.1, 2.5], then\n\noutput = [1, 2, 1]\n\nIf data = [[2, 3], [4, 1], [2, 5]] and boundaries = [0.1, 2.5], then\n\noutput = [[1, 2], [2, 1], [1, 2]]\n\n",
+ "attributes": [
+ {
+ "description": "bucketization boundaries",
+ "name": "boundaries",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "input tensor",
+ "name": "data"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "indices of bins given by boundaries to which each valuein data belongs",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ByteWeightDequant",
+ "support_level": "default"
+ },
+ {
+ "name": "Cast",
+ "description": "\nCasts the elements of a given input tensor to a data type specified by the `to`\nargument and returns an output tensor of the same size in the converted type.\nThe `to` argument must be one of the data types specified in the *DataType*\nenum field in the TensorProto message (see below). If the `to` argument is not\nprovided or is not one of the enumerated types in *DataType*, Caffe2 throws an\nEnforce error.\n\nNOTE: Casting from strings is not supported, and casting to strings is only\nsupported on CPU.\n\nTensorProto *DataType* field:\n```\nmessage TensorProto {\n ...\n enum DataType {\n UNDEFINED = 0;\n FLOAT = 1; // float\n INT32 = 2; // int\n BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8.\n STRING = 4; // string\n BOOL = 5; // bool\n UINT8 = 6; // uint8_t\n INT8 = 7; // int8_t\n UINT16 = 8; // uint16_t\n INT16 = 9; // int16_t\n INT64 = 10; // int64_t\n FLOAT16 = 12; // at::Half\n DOUBLE = 13; // double\n }\n```\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cast_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cast\",\n [\"X\"],\n [\"Y\"],\n to=2\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32)*10)\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[9.436466 5.8529844 0.54932857]\n [1.1583444 2.9936118 0.22950427]\n [3.9143739 3.4040766 8.905341 ]]\nY: [[9 5 0]\n [1 2 0]\n [3 3 8]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "Data type to which the elements of the input tensor are cast. Strictly must be one of the types from *DataType* enum in TensorProto.",
+ "name": "to",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* Input tensor to be cast.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor`<'to' type>`)* Output tensor with the same shape as input with type specified by the `to` argument.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Cbrt",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor calculated as the cbrt of the input tensor, element-wise.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CbrtGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Ceil",
+ "description": "\nElement-wise application of the ceil function ($y=ceil(x)$) to the input tensor\n`X`. Output tensor shape is the same as the input tensor.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/ceil_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Ceil\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.uniform(-10, 10, (5,5))).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[ 8.44598 -6.5098248 -2.2993476 -7.6859694 0.58566964]\n [-7.846551 -0.03689406 6.9362907 -4.0521703 4.4969673 ]\n [ 0.33355865 -7.895527 -8.393201 9.374202 -2.3930092 ]\n [-6.3061996 3.1403487 3.782099 -8.516556 -2.8387244 ]\n [-2.0164998 4.7663913 -3.422966 0.3636999 8.75713 ]]\nX after running op:\n[[ 9. -6. -2. -7. 1.]\n [-7. -0. 7. -4. 5.]\n [ 1. -7. -8. 10. -2.]\n [-6. 4. 4. -8. -2.]\n [-2. 5. -3. 1. 9.]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ChannelBackpropStats",
+ "description": "\nGiven an input tensor in NCHW format, the gradient for the output of SpatialBN\nand the per-channel mean and inverse std var vectors for the input, computes the\nper-channel bias and scale gradient to be used during the backward pass for\nsubsequent spatial batch normalization gradient calculation. Typically, the\nresults of this op are subsequently reduced over multiple devices to obtain\nstatistics over a larger batch size in cases where the batch size for a single\nmodel copy is too low to yield the full benefit of batch normalization. The\nresulting bias and scale can then be plugged back into SpatialBNGradient to get\nresults over the larger batch size ",
+ "inputs": [
+ {
+ "description": "The input 4-dimensional tensor of shape NCHW",
+ "name": "X"
+ },
+ {
+ "description": "The mean saved from the forward pass as a 1-dimensional tensor of size C.",
+ "name": "mean"
+ },
+ {
+ "description": "The saved inverse standard deviation as a 1-dimensional tensor of size C.",
+ "name": "inv_std"
+ },
+ {
+ "description": "Gradient for the output layer of SpatialBN, here used as input because we are on the backward pass",
+ "name": "output_grad"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Gradient for the scale vector",
+ "name": "scale_grad"
+ },
+ {
+ "description": "Gradient for the bias vector",
+ "name": "bias_grad"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ChannelShuffle",
+ "support_level": "default"
+ },
+ {
+ "name": "ChannelShuffleGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "ChannelStats",
+ "description": "\nGiven an input tensor in NCHW format, computes the sum of all elements per\nchannel and the sum of all elements squared per channel. These values can be\nreduced across multiple batches and used to obtain the mean and variance across\nthe full set of batches. Using the new mean and variance as input to SpatialBN\nhas the effect of changing the batch size over which SpatialBN is applied.\n",
+ "inputs": [
+ {
+ "description": "The input 4-dimensional tensor of shape NCHW",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output 1-dimensional tensor of size C containing the sum of elements of X per channel.",
+ "name": "sum"
+ },
+ {
+ "description": "The output 1-dimensional tensor of size C containing the sum of elements squared per channel.",
+ "name": "sumsq"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CheckAtomicBool",
+ "description": "Copy the value of an atomic to a bool",
+ "inputs": [
+ {
+ "description": "Blob containing a unique_ptr>",
+ "name": "atomic_bool"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Copy of the value for the atomic",
+ "name": "value"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CheckCounterDone",
+ "description": "\nIf the internal count value <= 0, outputs true, otherwise outputs false.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.",
+ "name": "counter"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: bool)* True if the internal count is zero or negative, otherwise False.",
+ "name": "done"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CheckDatasetConsistency",
+ "description": "\nChecks that the given data fields represents a consistent dataset under\nthe schema specified by the `fields` argument. Operator fails if the fields\nare not consistent. If data is consistent, each field's data can be safely\nappended to an existing dataset, keeping it consistent.\n",
+ "attributes": [
+ {
+ "description": "List of strings representing the string names in the formatspecified in the doc for CreateTreeCursor.",
+ "name": "fields",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Data for field 0.",
+ "name": "field_0"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Checkpoint",
+ "description": "\nThe Checkpoint operator is similar to the Save operator, but allows one to save\nto db every few iterations, with a db name that is appended with the iteration\ncount. It takes [1, infinity) number of inputs and has no output. The first\ninput has to be a TensorCPU of type int and has size 1 (i.e. the iteration\ncounter). This is determined whether we need to do checkpointing.\n",
+ "attributes": [
+ {
+ "description": "(int, default 0) if set, use the db path directly and do not prepend the current root folder of the workspace.",
+ "name": "absolute_path",
+ "option": "optional"
+ },
+ {
+ "description": "(string) a template string that one can combine with the iteration to create the final db name. For example, \"/home/lonestarr/checkpoint_%08d.db\"",
+ "name": "db",
+ "option": "optional"
+ },
+ {
+ "description": "(string) the type of the db.",
+ "name": "db_type",
+ "option": "optional"
+ },
+ {
+ "description": "(int, default 1) the checkpointing is carried out when (iter mod every) is zero.",
+ "name": "every",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Clip",
+ "description": "\nThis operator limits the given input within an interval. The interval is\nspecified by the `min` and `max` arguments. They default to\n*numeric_limits::lowest()* and *numeric_limits::max()* respectively. The\nclipping operation can be done in an in-place fashion by using the same output\nblob as the input blob.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/clip_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Clip\",\n [\"X\"],\n [\"Y\"],\n min=20.0,\n max=60.0\n\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(5,5))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\nX: [[45. 16. 59. 99. 48.]\n [12. 44. 46. 82. 28.]\n [ 1. 91. 18. 9. 71.]\n [24. 37. 61. 12. 81.]\n [36. 38. 30. 84. 40.]]\nY: [[45. 20. 59. 60. 48.]\n [20. 44. 46. 60. 28.]\n [20. 60. 20. 20. 60.]\n [24. 37. 60. 20. 60.]\n [36. 38. 30. 60. 40.]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "Minimum value, under which element is replaced by min (default=*numeric_limits::lowest()*).",
+ "name": "min",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "description": "Maximum value, under which element is replaced by max (default=*numeric_limits::max()*).",
+ "name": "max",
+ "option": "optional",
+ "type": "float32"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(Tensor``)* Input tensor within range [*numeric_limits::lowest()*, *numeric_limits::max()*].",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(Tensor``)* Output tensor clipped within range [`min`, `max`].",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ClipGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "ClipTensorByScaling",
+ "description": "\n Clips the input tensor by scaling based on the input value and the threshold.\n The value is usually the (pre-computed) norm of the tensor. If the value is\n larger than the threshold, scaling would be performed in this way:\n\n tensor *= (threshold / value).\n\n An optional input called additional_threshold can be provided which\n will scale the original threshold before it is used. That is,\n the final threshold will become threshold * additional_threshold.\n This op could be used for gradient clipping.\n",
+ "attributes": [
+ {
+ "description": "Threshold to determine whether to scale down the tensor",
+ "name": "threshold",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Tensor of floats to be clipped.",
+ "name": "input_tensor"
+ },
+ {
+ "description": "Value to be compared against the threshold",
+ "name": "val"
+ },
+ {
+ "description": "An optional additional threshold to scale the original threshold",
+ "name": "additional_threshold"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor of floats, which is the same size as the input tensor, representing the clipped tensor.",
+ "name": "clipped"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CloneCommonWorld",
+ "description": "\nClones existing common world.\n",
+ "inputs": [
+ {
+ "description": "Existing common world to clone.",
+ "name": "existing_comm_world"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "A common world for collective operations.",
+ "name": "comm_world"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CloseBlobsQueue",
+ "support_level": "default"
+ },
+ {
+ "name": "CloseRebatchingQueue",
+ "description": "\nCloses the Queue.\n",
+ "inputs": [
+ {
+ "description": "object representing the queue",
+ "name": "queue"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Col2Im",
+ "support_level": "default"
+ },
+ {
+ "name": "CollectAndDistributeFpnRpnProposals",
+ "description": "\nMerge RPN proposals generated at multiple FPN levels and then\ndistribute those proposals to their appropriate FPN levels for Faster RCNN.\nAn anchor at one FPN level may predict an RoI that will map to another level,\nhence the need to redistribute the proposals.\n\nOnly inference is supported. To train, please use the original Python\noperator in Detectron.\n\nInputs and outputs are examples only; if min/max levels change,\nthe number of inputs and outputs, as well as their level numbering,\nwill change.\n",
+ "attributes": [
+ {
+ "description": "(int) ROI_CANONICAL_SCALE",
+ "name": "roi_canonical_scale",
+ "option": "optional"
+ },
+ {
+ "description": "(int) ROI_CANONICAL_LEVEL",
+ "name": "roi_canonical_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) ROI_MAX_LEVEL",
+ "name": "roi_max_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) ROI_MIN_LEVEL",
+ "name": "roi_min_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_MAX_LEVEL",
+ "name": "rpn_max_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_MIN_LEVEL",
+ "name": "rpn_min_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_POST_NMS_TOP_N",
+ "name": "rpn_post_nms_topN",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "RPN proposals for FPN level 2, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn2"
+ },
+ {
+ "description": "RPN proposals for FPN level 3, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn3"
+ },
+ {
+ "description": "RPN proposals for FPN level 4, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn4"
+ },
+ {
+ "description": "RPN proposals for FPN level 5, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn5"
+ },
+ {
+ "description": "RPN proposals for FPN level 6, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn6"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 2. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn2"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 3. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn3"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 4. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn4"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 5. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn5"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 6. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn6"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)",
+ "name": "rois"
+ },
+ {
+ "description": "RPN proposals for ROI level 2, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn2"
+ },
+ {
+ "description": "RPN proposals for ROI level 3, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn3"
+ },
+ {
+ "description": "RPN proposals for ROI level 4, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn4"
+ },
+ {
+ "description": "RPN proposals for ROI level 5, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn5"
+ },
+ {
+ "description": "Permutation on the concatenation of all rois_fpni, i=min...max, such that when applied the RPN RoIs are restored to their original order in the input blobs.",
+ "name": "rois_idx_restore"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CollectRpnProposals",
+ "description": "\n...\n",
+ "attributes": [
+ {
+ "description": "(int) RPN_MAX_LEVEL",
+ "name": "rpn_max_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_MIN_LEVEL",
+ "name": "rpn_min_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_POST_NMS_TOP_N",
+ "name": "rpn_post_nms_topN",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "RPN proposals for FPN level 2, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn2"
+ },
+ {
+ "description": "RPN proposals for FPN level 3, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn3"
+ },
+ {
+ "description": "RPN proposals for FPN level 4, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn4"
+ },
+ {
+ "description": "RPN proposals for FPN level 5, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn5"
+ },
+ {
+ "description": "RPN proposals for FPN level 6, format (image_index, x1, y1, x2, y2). See rpn_rois documentation from GenerateProposals.",
+ "name": "rpn_rois_fpn6"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 2. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn2"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 3. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn3"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 4. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn4"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 5. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn5"
+ },
+ {
+ "description": "RPN objectness probabilities for FPN level 6. See rpn_roi_probs documentation from GenerateProposals.",
+ "name": "rpn_roi_probs_fpn6"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)",
+ "name": "rois"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CollectTensor",
+ "description": "\nCollect tensor into tensor vector by reservoir sampling,\nargument num_to_collect indicates the max number of tensors that will be\ncollected. The first half of the inputs are tensor vectors, which are also the\noutputs. The second half of the inputs are the tensors to be collected into each\nvector (in the same order). The input tensors are collected in all-or-none\nmanner. If they are collected, they will be placed at the same index in the\noutput vectors.\n",
+ "attributes": [
+ {
+ "description": "The max number of tensors to collect",
+ "name": "num_to_collect",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ColwiseMax",
+ "description": "\nCompute column-wise max reduction of the input tensor. This op takes one input, $X$, of shape $BxMxN$, where $B$ is the batch size, $M$ is number of rows, and $N$ is number of columns. The output of this op, $Y$, is a matrix of shape $BxN$, with one row for each element of the batch, and the same number of columns as the input tensor.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/reduction_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ColwiseMax\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X, simulating a batch of 2, 4x4 matricies\nX = np.random.randint(0,high=20,size=(2,4,4))\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[17 15 2 6]\n [ 8 12 6 0]\n [ 6 9 7 3]\n [ 4 13 16 13]]\n\n [[ 0 3 4 12]\n [18 1 17 12]\n [ 7 17 13 14]\n [12 17 2 1]]]\nY:\n [[17. 15. 16. 13.]\n [18. 17. 17. 14.]]\n\n```\n\n \n\n ",
+ "inputs": [
+ {
+ "description": "A tensor of dimensions $B x M x N$ to compute columnwise-max. Here, $B$ is batch size, and $M$ and $N$ are the number of rows and columns of each element of the batch, respectively.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output tensor of shape $B x N$, where each row represents the column-wise maximums for that element of the input batch.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ColwiseMaxGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "ComputeOffset",
+ "description": "\nCompute the offsets matrix given cursor and data blobs. Need to be ran at\nbeginning or after reseting cursor\n\nInput(0) is a blob pointing to a TreeCursor, and\n[Input(1),... Input(num_fields)] a list of tensors containing the data for\neach field of the dataset.\n\nComputeOffset is thread safe.\n",
+ "inputs": [
+ {
+ "description": "A blob containing a pointer to the cursor.",
+ "name": "cursor"
+ },
+ {
+ "description": "First dataset field",
+ "name": "dataset_field_0"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor containing offset info for this chunk.",
+ "name": "field_0"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Concat",
+ "category": "Tensor",
+ "description": "\nConcatenate a list of tensors into a single tensor. Similar functionality to\nNumpy's [concatenate](https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html)\nfunction. The `axis` argument specifies what axis along which the arrays will be concatenated.\nWhen set to non-zero (default=0), the `add_axis` argument adds the axis specified in `axis` to\nall input tensors.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/concat_split_op.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Concat\",\n [\"X1\", \"X2\"],\n [\"Y\", \"split_info\"],\n axis=0\n)\n\nworkspace.FeedBlob(\"X1\", np.array([[1,2],[3,4]]))\nworkspace.FeedBlob(\"X2\", np.array([[5,6]]))\nprint(\"X1:\", workspace.FetchBlob(\"X1\"))\nprint(\"X2:\", workspace.FetchBlob(\"X2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"split_info:\", workspace.FetchBlob(\"split_info\"))\n\n```\n\n**Result**\n\n```\n\nX1: [[1 2]\n [3 4]]\nX2: [[5 6]]\nY: [[1 2]\n [3 4]\n [5 6]]\nsplit_info: [2 1]\n\n```\n\n \n\n\n\n Example 2
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Concat\",\n [\"X1\", \"X2\"],\n [\"Y\", \"split_info\"],\n add_axis=1,\n axis=3\n)\n\nworkspace.FeedBlob(\"X1\", np.random.randint(10, size=(1, 1, 5, 5))) // NCHW\nworkspace.FeedBlob(\"X2\", np.random.randint(10, size=(1, 1, 5, 5))) // NCHW\nprint(\"X1:\", workspace.FetchBlob(\"X1\"))\nprint(\"X2:\", workspace.FetchBlob(\"X2\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"split_info:\", workspace.FetchBlob(\"split_info\"))\n\n```\n\n**Result**\n\n```\n\nX1: [[[[1 8 3 9 0]\n [6 4 6 5 6]\n [3 9 1 9 9]\n [5 1 0 7 7]\n [9 4 0 0 9]]]]\nX2: [[[[7 0 2 6 1]\n [3 9 4 0 3]\n [5 3 8 9 4]\n [3 4 2 1 0]\n [0 8 8 8 1]]]]\nY: [[[[[1 8 3 9 0]\n [7 0 2 6 1]]\n\n [[6 4 6 5 6]\n [3 9 4 0 3]]\n\n [[3 9 1 9 9]\n [5 3 8 9 4]]\n\n [[5 1 0 7 7]\n [3 4 2 1 0]]\n\n [[9 4 0 0 9]\n [0 8 8 8 1]]]]]\nsplit_info: [1 1]\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "default": -1,
+ "description": "Axis to concatenate on.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "description": "Order of blob dimensions. Concats on the C dimension.",
+ "name": "order",
+ "option": "optional",
+ "type": "string"
+ },
+ {
+ "description": "Pass non-zero integer to add the axis specified in `axis` to all input tensors.",
+ "name": "add_axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "name": "inputs",
+ "option": "variadic"
+ },
+ {
+ "description": "*(type: Tensor``)* List of input tensors.",
+ "name": "X1, X2, ..."
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Concatenated tensor.",
+ "name": "concat_result"
+ },
+ {
+ "description": "*(type: Tensor``)* The dimensions of the inputs.",
+ "name": "split_info"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ConcatBatchMatMulBatchGatherOp",
+ "support_level": "default"
+ },
+ {
+ "name": "ConcatTensorVector",
+ "description": "\nConcat Tensors in the std::unique_ptr >\nalong the first dimension.\n ",
+ "inputs": [
+ {
+ "description": "std::unique_ptr >",
+ "name": "vector of Tensor"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "tensor after concatenating",
+ "name": "tensor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Conditional",
+ "description": "\nGiven a 1-D tensor of boolean values, apply conditional operator along the first\ndimension of DataT and DataF and return DataO. Note, DataT and DataF must\nhave the exact same shape and type.\n",
+ "inputs": [
+ {
+ "description": "Boolean tensor to select DataT or DataF",
+ "name": "Condition"
+ },
+ {
+ "description": "Data to use when True",
+ "name": "DataT"
+ },
+ {
+ "description": "Data to use when False",
+ "name": "DataF"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data after applying ConditionalOp",
+ "name": "DataO"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ConditionalSetAtomicBool",
+ "description": "\nSet an atomic to true if the given condition bool variable is true\n ",
+ "inputs": [
+ {
+ "description": "Blob containing a unique_ptr>",
+ "name": "atomic_bool"
+ },
+ {
+ "description": "Blob containing a bool",
+ "name": "condition"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ConstantFill",
+ "description": "\nThis operator fills the elements of the output tensor with a constant value\nspecified by the `value` argument.\n\n- The data type is specified by the `dtype` argument\n\n- Currently, the data types supported are *float*, *int32*, *int64*, and *bool*\n\n- If the `dtype` argument is not provided, the data type of `value` is used\n\n- The output tensor shape is either specified by the `shape` argument or will\nmatch the shape of the input tensor if one is provided (if an input tensor is\nprovided, a shape argument should not be set)\n\n- Optional additional dimensions can be appended at the end as specified by\n`extra_shape` argument\n\n- If `input_as_shape` is set to True, the input should be a 1D tensor\ncontaining the desired output shape (the dimensions specified in `extra_shape`\nwill also be appended)\n\n- If a second input V is passed, fill the output with the first element of V\n\nWhen specifying `dtype` argument, use the integer keys from the *DataType* enum\nin TensorProto:\n\n```\nmessage TensorProto {\n ...\n enum DataType {\n UNDEFINED = 0;\n FLOAT = 1; // float\n INT32 = 2; // int\n BYTE = 3; // BYTE, when deserialized, is going to be restored as uint8.\n STRING = 4; // string\n BOOL = 5; // bool\n UINT8 = 6; // uint8_t\n INT8 = 7; // int8_t\n UINT16 = 8; // uint16_t\n INT16 = 9; // int16_t\n INT64 = 10; // int64_t\n FLOAT16 = 12; // at::Half\n DOUBLE = 13; // double\n }\n```\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/filler_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConstantFill\",\n [],\n [\"Y\"],\n shape=(1,5,5)\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nY: [[[0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]\n [0. 0. 0. 0. 0.]]]\n```\n \n\n\n Example 2
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConstantFill\",\n [\"X\"],\n [\"Y\"],\n value=4.0,\n dtype=1,\n extra_shape=(1,2)\n)\n\nworkspace.FeedBlob(\"X\", (np.random.randint(100, size=(3,3))).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[86. 30. 84.]\n [34. 51. 9.]\n [29. 86. 59.]]\nY: [[[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]\n\n\n [[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]\n\n\n [[[4. 4.]]\n\n [[4. 4.]]\n\n [[4. 4.]]]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "value to populate output tensor with.",
+ "name": "value",
+ "option": "optional"
+ },
+ {
+ "description": "The data type for the elements of the output tensor. Strictly must be one of the types from *DataType* enum in TensorProto.",
+ "name": "dtype",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "description": "Shape of the output tensor. Cannot pass an input blob and this arg at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "Additional dimensions appended at the end of the shape indicated by the input blob. Cannot set thisargument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* [OPTIONAL] Input tensor to provide shape information.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor)* Output tensor of constant values.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Conv",
+ "category": "Layer",
+ "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n \n\n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "pad"
+ },
+ {
+ "default": 1,
+ "name": "stride"
+ },
+ {
+ "name": "exhaustive_search",
+ "type": "boolean",
+ "visible": false
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.",
+ "name": "filter"
+ },
+ {
+ "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Conv1D",
+ "description": "\nThe convolution operator consumes an input vector, a 1D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.",
+ "name": "filter"
+ },
+ {
+ "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Conv1DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Conv2D",
+ "description": "\nThe convolution operator consumes an input vector, a 2D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.",
+ "name": "filter"
+ },
+ {
+ "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Conv2DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Conv3D",
+ "description": "\nThe convolution operator consumes an input vector, a 3D filter blob\nand a bias blob and computes the output. \nThe Conv2D operator computes a 2D convolution operation over an input blob $(X)$, with a filter blob $(filter)$ and a bias blob $(bias)$, and outputs a single output blob $(Y)$. Although there are several options for order, the convention is that the input $(X)$ is a blob of shape $(N,C_{in},H_{in},W_{in})$ and the output $(Y)$ is a blob of shape $(N,C_{out},H_{out},W_{out})$. Here, $N$ is the batch size, $C$ is the number of channels, $H$ is the spatial height, and $W$ is the spatial width. For example, if your input data was a batch of five, 100x120pixel RGB images, $X$ would have shape $(5,3,120,100)$.\n\nThe $filter$ input blob may contain multiple filters and has shape $(M, C_{in}, K_H, K_W)$. Here, $M$ is the number of individual filters contained in the blob, $C_{in}$ is the number of channels of each filter (by convention in 2D convolution it is the same as the number of channels in the input), $K_H$ is the spatial height of the kernel, and $K_W$ is the spatial width of the kernel. The $bias$ blob is a vector of length $M$, where there is one bias for each filter in the $filter$ blob.\n\nGiven the shape of the input blob and the filter blob, we can calculate the shape of the output blob as follows. The number of items in the batch $N$ will stay the same. The number of channels in the output will equal the number of kernels in the filter blob, so $C_{out} = M.$ With stride and pad defined below, the spatial height and width of the output ($H_{out}$ and $W_{out}$) are calculated as\n\n$$H_{out} = \\left \\lfloor{\\frac{H_{in} - K_H + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\n$$W_{out} = \\left \\lfloor{\\frac{W_{in} - K_W + 2*pad}{stride}+1}\\right \\rfloor$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Conv\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernel=5,\n pad=1,\n stride=2\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(1,1,8,8).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create W: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,5,5).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.,1.,1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (1, 1, 8, 8)\nFilter shape: (3, 1, 5, 5)\nBias shape: (3,)\nY:\n [[[[ 0.6406407 0.8620521 0.56461596]\n [ -1.5042953 -0.79549205 -10.683343 ]\n [ -0.5240259 3.4538248 -3.9564204 ]]\n\n [[ 0.6876496 4.8328524 -1.9525816 ]\n [ 1.2995434 -2.3895378 7.2670045 ]\n [ 3.9929862 1.8126237 5.4699917 ]]\n\n [[ 3.55949 4.7934155 0.76086235]\n [ 3.9588015 -1.3251319 4.413117 ]\n [ -1.5296054 -1.4924102 -3.2552304 ]]]]\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be convolved with the kernels in the filter blob.",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob, of shape $(M, C_{in}, K_H, K_W)$, containing the filters to be convolved with the data.",
+ "name": "filter"
+ },
+ {
+ "description": "The bias blob, of length $M$, containing the biases for the convolution, one bias per filter.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the convolution.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Conv3DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "ConvGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "ConvRelu",
+ "support_level": "default"
+ },
+ {
+ "name": "ConvTranspose",
+ "category": "Layer",
+ "description": "\nThe ConvTranspose op takes an input data tensor $X$, an input weight tensor $filter$, and optionally an input bias tensor $bias$. It then computes the transposed convolution, sometimes referred to as deconvolution, and produces a single output tensor $Y$. The hyperparameters of the op such as kernel size, stride, and padding are specified as args. At each stride, the filter is deconvolved with a subset of $X$ and the $bias$ is added. This is done throughout the input data until the output computation is complete.\n\nThe output shapes are computed as follows. The number of channels in the output feature map is the number of kernels specified in the filter blob. The spatial height and width are computed as:\n\n$$H_{out} = (H_{in}-1)*strides[0] - 2*pads[0] + kernels[0]$$\n\n\n$$W_{out} = (W_{in}-1)*strides[1] - 2*pads[1] + kernels[1]$$\n\nNote on the implementation layout: conv_transpose_op_impl.h is the templated implementation of the conv_transpose_op.h file, which is why they are separate files. Also, in the implementation this operator inherits from the *ConvTransposeUnpoolOpBase* operator.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_op.h\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_op.cc\n- https://github.com/pytorch/pytorch/tree/master/caffe2/operators/conv_transpose_unpool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ConvTranspose\",\n [\"X\", \"filter\", \"bias\"],\n [\"Y\"],\n kernels=[2,2],\n pads=[4,4,4,4],\n strides=[2,2]\n)\n\n// Create X: (N,C,H,W)\ndata = np.random.randn(2,3,5,5).astype(np.float32)\nprint(\"Data shape: \",data.shape)\n\n// Create filter: (M,C,Kh,Kw)\nfilters = np.random.randn(3,1,2,2).astype(np.float32)\nprint(\"Filter shape: \",filters.shape)\n\n// Create b: M\nbias = np.array([1.]).astype(np.float32)\nprint(\"Bias shape: \",bias.shape)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"filter\", filters)\nworkspace.FeedBlob(\"bias\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nData shape: (2, 3, 5, 5)\nFilter shape: (3, 1, 2, 2)\nBias shape: (1,)\nY:\n [[[[0.53606427 0.5775447 ]\n [0.40148795 1.5188271 ]]]\n\n\n [[[1.9903406 3.2794335 ]\n [0.09960175 0.31917763]]]]\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "description": "Should the legacy padding be VALID or SAME. When used, pads should not be used.",
+ "name": "legacy_pad",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "description": "Desired kernel size. If left at default the kernel size will be inferred from the input $filter$ blob.",
+ "name": "kernels",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "description": "Controls the stride of the kernel as it traverses the input blob.",
+ "name": "strides",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "description": "Controls the amount of padding applied to the input feature map before computation.",
+ "name": "pads",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "description": "",
+ "name": "adjs",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "default": "NCHW",
+ "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".",
+ "name": "order",
+ "option": "optional",
+ "type": "string"
+ },
+ {
+ "default": 0,
+ "description": "",
+ "name": "shared_buffer",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": false,
+ "description": "",
+ "name": "no_bias",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data blob, of shape $(N, C_{in}, H_{in}, W_{in})$, to be operated on.",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob, of shape $(M, C_{out}, K_H, K_W)$, containing the filters to be used in the transposed convolution.",
+ "name": "filter"
+ },
+ {
+ "description": "The bias blob, of length $C_{out}$, containing the biases for the operation, one bias per output channel. If not passed, biases assumed to be zeros.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob, of shape $(N, C_{out}, H_{out}, W_{out})$, that contains the result of the operation.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ConvTransposeGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Copy",
+ "description": "\nCopy input tensor into output, potentially across devices.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/copy_op.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/copy_op.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Copy\",\n [\"input\"],\n [\"output\"]\n)\n\nworkspace.FeedBlob(\"input\", np.random.rand(3,3))\nprint(\"input:\", workspace.FetchBlob(\"input\"))\nworkspace.RunOperatorOnce(op)\nprint(\"output:\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n[[0.16826761 0.68168217 0.55196001]\n [0.19735483 0.34837823 0.69015595]\n [0.09448514 0.57390828 0.37097193]]\noutput:\n[[0.16826761 0.68168217 0.55196001]\n [0.19735483 0.34837823 0.69015595]\n [0.09448514 0.57390828 0.37097193]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "(*Tensor*): input tensor to copy",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor*): copy of input tensor",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CopyFromCPUInput",
+ "description": "\nTake a CPU input tensor and copy it to an output in the current\nContext (GPU or CPU). This may involves cross-device MemCpy.\n",
+ "inputs": [
+ {
+ "description": "The input CPU tensor.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "either a TensorCUDA or a TensorCPU",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CopyOnDeviceLike",
+ "description": "Copy input tensor into output to the specific device.",
+ "inputs": [
+ {
+ "description": "The input tensor.",
+ "name": "input"
+ },
+ {
+ "description": "Tensor, on which device the copy will be performed.",
+ "name": "dst"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor that will contain a copy of the input.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CopyRowsToTensor",
+ "description": "\n This operator takes in a 2d tensor, a list of indices, and a 1d tensor\n with the same width of the 2d tensor. It will replace the rows in 2d\n tensor specified in indices with the 2d tensor. The operator does an\n in-place change to the input tensor.\n Example:\n INPUT_TENSOR = [[1, 2], [3, 4], [5, 6]]\n INDICES = [1]\n ROW = [9, 0]\n OUTPUT_TENSOR = [[1, 2], [9, 0], [5, 6]]\n ",
+ "inputs": [
+ {
+ "description": "Input tensor needs to be modified.",
+ "name": "input_tensor"
+ },
+ {
+ "description": "Indices of rows need to be copied",
+ "name": "indices"
+ },
+ {
+ "description": "1-d tensor that is going to replace the rows",
+ "name": "row"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "updated tensor",
+ "name": "output_tensor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CopyRowsToTensorGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Cos",
+ "description": "\nCalculates the cosine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cos_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cos\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.6816719 0.76771533 0.933932 0.01404487 0.11862425]\nY: [0.7765203 0.71949923 0.5946774 0.99990135 0.9929724 ]\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor calculated as the cosine of the input tensor, element-wise.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CosGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Cosh",
+ "description": "\nCalculates the hyperbolic cosine of the given input tensor, element-wise.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/cosh_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Cosh\",\n [\"X\"],\n [\"Y\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(5).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX: [0.66423494 0.32074615 0.81523746 0.90423071 0.39275789]\nY: [1.22883528 1.05188156 1.35112322 1.43744212 1.07812598]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The hyperbolic cosine values of the input tensor, computed element-wise",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CoshGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "CosineEmbeddingCriterion",
+ "description": "\nCosineEmbeddingCriterion takes two inputs: the similarity value and\nthe label, and computes the elementwise criterion output as\n\n output = 1 - s, if y == 1\n max(0, s - margin), if y == -1\n",
+ "inputs": [
+ {
+ "description": "The cosine similarity as a 1-dim TensorCPU.",
+ "name": "S"
+ },
+ {
+ "description": "The label as a 1-dim TensorCPU with int value of 1 or -1.",
+ "name": "Y"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output loss with the same dimensionality as S.",
+ "name": "loss"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CosineEmbeddingCriterionGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "CosineSimilarity",
+ "description": "\nThis op takes two input float tensors of the same size, $X$ and $Y$, and produces one output float tensor , $Z$, calculated as the cosine similarity between $X$ and $Y$. Recall, the cosine similarity between two tensors $X$ and $Y$ is defined as:\n\n$$\\mathbf{Z}=CosineSimilarity(\\mathbf{X},\\mathbf{Y}) = \\frac{\\mathbf{X}\\cdot\\mathbf{Y}}{\\|\\mathbf{X}\\|\\|\\mathbf{Y}\\|} = \\frac{\\sum_n^{i=1}X_iY_i}{\\sqrt{\\sum_n^{i=1}X_i^2}\\sqrt{\\sum_n^{i=1}Y_i^2}}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"CosineSimilarity\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\n// Create X\nX = np.random.randn(3, 3)\nprint(\"X:\\n\",X)\n\n// Create Y\nY = np.random.randn(3, 3)\nprint(\"Y:\\n\",Y)\n\n// Feed X & Y into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"Y\", Y.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.42635564 -0.23831588 -0.25515547]\n [ 1.43914719 -1.05613228 1.01717373]\n [ 0.06883105 0.33386519 -1.46648334]]\nY:\n [[-0.90648691 -0.14241514 -1.1070837 ]\n [ 0.92152729 -0.28115511 -0.17756722]\n [-0.88394254 1.34654037 -0.80080998]]\nZ:\n [-1.7849885e-23 1.7849885e-23 -1.0842022e-07]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "1D or 2D input tensor",
+ "name": "X"
+ },
+ {
+ "description": "1D or 2D input tensor (must have the same shape as X)",
+ "name": "Y"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor",
+ "name": "Z"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CosineSimilarityGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "CountDown",
+ "description": "\nIf the internal count value > 0, decreases count value by 1 and outputs False,\notherwise outputs True.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.",
+ "name": "counter"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: bool)* False unless the internal count is zero.",
+ "name": "done"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CountUp",
+ "description": "\nIncreases count value by 1 and outputs the previous value atomically.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* A blob pointing to an instance of a counter.",
+ "name": "counter"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: int)* Count value BEFORE this operation.",
+ "name": "previous_count"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateAtomicBool",
+ "description": "Create an unique_ptr blob to hold an atomic",
+ "outputs": [
+ {
+ "description": "Blob containing a unique_ptr>",
+ "name": "atomic_bool"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateBlobsQueue",
+ "support_level": "default"
+ },
+ {
+ "name": "CreateBlobsQueueDB",
+ "description": "Create a DBReader from a BlobsQueue",
+ "attributes": [
+ {
+ "description": "(default: -1 (no key)) index of blob for DB key in the BlobsQueue.",
+ "name": "key_blob_index",
+ "option": "optional"
+ },
+ {
+ "description": "(default: 0) index of blob for DB value in the BlobsQueue.",
+ "name": "value_blob_index",
+ "option": "optional"
+ },
+ {
+ "description": "(default: 0.0 (no timeout)) Timeout in seconds for reading from the BlobsQueue.",
+ "name": "timeout_secs",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The shared pointer to a queue containing Blobs.",
+ "name": "queue"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The DBReader for the given BlobsQueue",
+ "name": "reader"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateCommonWorld",
+ "description": "\nCreates a common world for communication operators.\n",
+ "attributes": [
+ {
+ "description": "(int) size of the common world.",
+ "name": "size",
+ "option": "optional"
+ },
+ {
+ "description": "(int) rank of this node in the common world.",
+ "name": "rank",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Key/value handler for rendezvous (optional).",
+ "name": "kv_handler"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "A common world for collective operations.",
+ "name": "comm_world"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateCounter",
+ "description": "\nCreates a count-down counter with initial value specified by the `init_count`\nargument.\n\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/counter_ops.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\ncreatecounter_op = core.CreateOperator(\n \"CreateCounter\",\n [],\n [\"counter\"],\n init_count=5\n)\n\nretrievecount_op = core.CreateOperator(\n \"RetrieveCount\",\n [\"counter\"],\n [\"count\"]\n)\n\ncheckcounterdone_op = core.CreateOperator(\n \"CheckCounterDone\",\n [\"counter\"],\n [\"done\"]\n)\n\ncountup_op = core.CreateOperator(\n \"CountUp\",\n [\"counter\"],\n [\"previous_count\"],\n)\n\ncountdown_op = core.CreateOperator(\n \"CountDown\",\n [\"counter\"],\n [\"done\"],\n)\n\nresetcounter_op = core.CreateOperator(\n \"ResetCounter\",\n [\"counter\"],\n [\"previous_count\"],\n init_count=3\n)\n\n\n// Create counter\nworkspace.RunOperatorOnce(createcounter_op)\nprint(\"'counter' pointer:\", workspace.FetchBlob(\"counter\"))\n\n\n// Retrieve initial counter value\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"Initial 'count':\", workspace.FetchBlob(\"count\"))\n\n\n// Check if counter is done\nworkspace.RunOperatorOnce(checkcounterdone_op)\nprint(\"Initial 'done' value:\", workspace.FetchBlob(\"done\"))\n\n\n// Test CountUp operator\nprint(\"\\nTesting CountUp operator...\")\nfor i in range(5):\n workspace.RunOperatorOnce(countup_op)\n print(\"'previous_count' after CountUp:\", workspace.FetchBlob(\"previous_count\"))\n\nworkspace.RunOperatorOnce(retrievecount_op)\nprint(\"'count' value after CountUp test:\", workspace.FetchBlob(\"count\"))\n\n\n// Test CountDown operator\nprint(\"\\nTesting CountDown operator...\")\nfor i in range(11):\n workspace.RunOperatorOnce(countdown_op)\n workspace.RunOperatorOnce(retrievecount_op)\n print(\"'count' value after CountDown: {}\\t'done' value: {}\".format(workspace.FetchBlob(\"count\"), workspace.FetchBlob(\"done\")))\n```\n\n**Result**\n\n```\n'counter' pointer: counter, a C++ native class of type std::__1::unique_ptr, std::__1::default_delete > >.\nInitial 'count': 5\nInitial 'done' value: False\n\nTesting CountUp operator...\n'previous_count' after CountUp: 5\n'previous_count' after CountUp: 6\n'previous_count' after CountUp: 7\n'previous_count' after CountUp: 8\n'previous_count' after CountUp: 9\n'count' value after CountUp test: 10\n\nTesting CountDown operator...\n'count' value after CountDown: 9 'done' value: False\n'count' value after CountDown: 8 'done' value: False\n'count' value after CountDown: 7 'done' value: False\n'count' value after CountDown: 6 'done' value: False\n'count' value after CountDown: 5 'done' value: False\n'count' value after CountDown: 4 'done' value: False\n'count' value after CountDown: 3 'done' value: False\n'count' value after CountDown: 2 'done' value: False\n'count' value after CountDown: 1 'done' value: False\n'count' value after CountDown: 0 'done' value: False\n'count' value after CountDown: -1 'done' value: True\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Initial count for the counter, must be >= 0.",
+ "name": "init_count",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* A blob pointing to an instance of a new counter.",
+ "name": "counter"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateDB",
+ "support_level": "default"
+ },
+ {
+ "name": "CreateMap",
+ "description": "Create an empty map blob",
+ "attributes": [
+ {
+ "description": "Key's TensorProto::DataType (default INT32)",
+ "name": "key_dtype",
+ "option": "optional"
+ },
+ {
+ "description": "Value's TensorProto::DataType (default INT32)",
+ "name": "value_dtype",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Blob reference to the map",
+ "name": "map blob"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateMutex",
+ "description": "Creates an unlocked mutex and returns it in a unique_ptr blob.",
+ "outputs": [
+ {
+ "description": "Blob containing a std::unique_ptr.",
+ "name": "mutex_ptr"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateRebatchingQueue",
+ "description": "\nCreates the Queue.\n",
+ "attributes": [
+ {
+ "description": "Number of input tensors the queue will support",
+ "name": "num_blobs",
+ "option": "optional"
+ },
+ {
+ "description": "Maximal number of elements the queue can hold at any given point",
+ "name": "capacity",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "object representing the queue",
+ "name": "queue"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateScope",
+ "description": "\n'CreateScope' operator initializes and outputs empty scope that is used\nby Do operator to store local blobs\n ",
+ "support_level": "default"
+ },
+ {
+ "name": "CreateTensorVector",
+ "description": "Create a std::unique_ptr >",
+ "support_level": "default"
+ },
+ {
+ "name": "CreateTextFileReader",
+ "description": "Create a text file reader. Fields are delimited by .",
+ "attributes": [
+ {
+ "description": "Path to the file.",
+ "name": "filename",
+ "option": "optional"
+ },
+ {
+ "description": "Number of passes over the file.",
+ "name": "num_passes",
+ "option": "optional"
+ },
+ {
+ "description": "List with type of each field. Type enum is found at core.DataType.",
+ "name": "field_types",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Pointer to the created TextFileReaderInstance.",
+ "name": "handler"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CreateTreeCursor",
+ "description": "\nCreates a cursor to iterate through a list of tensors, where some of those\ntensors contain the lengths in a nested schema. The schema is determined by\nthe `fields` arguments.\n\nFor example, to represent the following schema:\n\n Struct(\n a=Int(),\n b=List(List(Int)),\n c=List(\n Struct(\n c1=String,\n c2=List(Int),\n ),\n ),\n )\n\nthe field list will be:\n [\n \"a\",\n \"b:lengths\",\n \"b:values:lengths\",\n \"b:values:values\",\n \"c:lengths\",\n \"c:c1\",\n \"c:c2:lengths\",\n \"c:c2:values\",\n ]\n\nAnd for the following instance of the struct:\n\n Struct(\n a=3,\n b=[[4, 5], [6, 7, 8], [], [9]],\n c=[\n Struct(c1='alex', c2=[10, 11]),\n Struct(c1='bob', c2=[12]),\n ],\n )\n\nThe values of the fields will be:\n {\n \"a\": [3],\n \"b:lengths\": [4],\n \"b:values:lengths\": [2, 3, 0, 1],\n \"b:values:values\": [4, 5, 6, 7, 8, 9],\n \"c:lengths\": [2],\n \"c:c1\": [\"alex\", \"bob\"],\n \"c:c2:lengths\": [2, 1],\n \"c:c2:values\", [10, 11, 12],\n }\n\nIn general, every field name in the format \"{prefix}:lengths\" defines a domain\n\"{prefix}\", and every subsequent field in the format \"{prefix}:{field}\" will\nbe in that domain, and the length of the domain is provided for each entry of\nthe parent domain. In the example, \"b:lengths\" defines a domain of length 4, so\nevery field under domain \"b\" will have 4 entries.\nThe \"lengths\" field for a given domain must appear before any reference to\nthat domain.\n\nReturns a pointer to an instance of the Cursor, which keeps the current offset\non each of the domains defined by `fields`. Cursor also ensures thread-safety\nsuch that ReadNextBatch and ResetCursor can be used safely in parallel.\n\nA cursor does not contain data per se, so calls to ReadNextBatch actually need\nto pass a list of blobs containing the data to read for each one of the fields.\n",
+ "attributes": [
+ {
+ "description": "A list of strings each one representing a field of the dataset.",
+ "name": "fields",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "A blob pointing to an instance of a new TreeCursor.",
+ "name": "cursor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CrossEntropy",
+ "description": "\nThis operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a $NxD$ dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows\n\n$$Y_i = \\sum_j (label_{ij} * log(X_{ij}))$$\n\nwhere ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"CrossEntropy\",\n [\"X\", \"label\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]])\nprint(\"X:\\n\",X)\n\n// Create label: Sample 1-hot ground truth label vectors\nlabel = np.array([[0.,0.,0.,0.,1.],[0.,0.,1.,0.,0.]])\nprint(\"label:\\n\",label)\n\n// Feed X & label into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"label\", label.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[0.01 0.05 0.02 0.02 0.9 ]\n [0.03 0.1 0.42 0.05 0.4 ]]\nlabel:\n [[0. 0. 0. 0. 1.]\n [0. 0. 1. 0. 0.]]\nY:\n [0.10536055 0.8675006 ]\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "Input tensor which is almost always the result of a softmax operation. $X$ is a 2D array of size $NxD$, where $N$ is the batch size and $D$ is the number of classes.",
+ "name": "X"
+ },
+ {
+ "description": "Blob containing the labels used to compare the input. $label$ is the same shape as $X$.",
+ "name": "label"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output blob from the cross entropy computation. $Y$ is 1D length $N$ tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CrossEntropyGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "CTCBeamSearchDecoder",
+ "description": "Prefix beam search decoder for connectionist temporal classification.",
+ "attributes": [
+ {
+ "description": "Maximum number of candidates to carry over to next activation step.",
+ "name": "beam_width",
+ "option": "optional"
+ },
+ {
+ "description": "Probability threshold below which outputs are ignored.",
+ "name": "prune_threshold",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "3D float Tensor sized [max_activation_length, batch_size, alphabet_size] of network logits (before softmax application).",
+ "name": "INPUTS"
+ },
+ {
+ "description": "(optional) 1D int vector containing sequence lengths, having size [batch_size] seq_len will be set to max_time if not provided.",
+ "name": "SEQ_LEN"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output_len matrix size (batch_size * num_candidates). Each index stores lengths of candidates for its corresponding batch item.",
+ "name": "OUTPUT_LEN"
+ },
+ {
+ "description": "Values vector, size (total_decoded_outputs). The flattened vector of final output sequences, in batch order.",
+ "name": "VALUES"
+ },
+ {
+ "description": "Probability vector, size (total_decoded_outputs). Each index stores final output probability of its corresponding batch item.",
+ "name": "OUTPUT_PROB"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CTCGreedyDecoder",
+ "description": "Greedy decoder for connectionist temporal classification.",
+ "attributes": [
+ {
+ "description": "When merge_repeated is true, merge repeated classes in output.",
+ "name": "merge_repeated",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "3D float Tensor sized [max_time, batch_size, num_classes]",
+ "name": "INPUTS"
+ },
+ {
+ "description": "(optional) 1D int vector containing sequence lengths, having size [batch_size]seq_len will be set to max_time if not provided",
+ "name": "SEQ_LEN"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output_len matrix size (batch). The row store: [decoded_length]",
+ "name": "OUTPUT_LEN"
+ },
+ {
+ "description": "Values vector, size (total_decoded_outputs). The vector stores the decoded classes",
+ "name": "VALUES"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Cube",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor calculated as the cube of the input tensor, element-wise.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "CubeGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "DataCouple",
+ "description": "\n\nA one to one operator that takes an arbitrary number of input and output blobs\nsuch that each input blob is inplace with it's matching output blob. It then proceedes\nto do nothing with each of these operators. This serves two purposes. It can make it\nappear as if a blob has been written to, as well as can tie together different blobs\nin a data dependency\n\n",
+ "support_level": "default"
+ },
+ {
+ "name": "DBExists",
+ "description": "\nChecks if the db described by the arguments exists.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"DBExists\",\n [],\n [\"exists\"],\n db_name=\"test_db\",\n db_type=\"leveldb\",\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"exists:\", workspace.FetchBlob(\"exists\"))\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.",
+ "name": "absolute_path",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "description": "Path to the db in question; see the `absolute_path` arg details for options regarding the current root folder of the workspace.",
+ "name": "db_name",
+ "option": "optional",
+ "type": "string"
+ },
+ {
+ "description": "Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").",
+ "name": "db_type",
+ "option": "optional",
+ "type": "string"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Scalar boolean output tensor. True if the db exists, else false.",
+ "name": "exists"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DenseVectorToIdList",
+ "description": "\nDenseVectorToIdList: Convert a blob with dense feature into a ID_LIST.\n\nAn ID_LIST is a list of IDs (may be ints, often longs) that represents a single\nfeature. As described in https://caffe2.ai/docs/sparse-operations.html, a batch\nof ID_LIST examples is represented as a pair of lengths and values where the\n`lengths` (int32) segment the `values` or ids (int32/int64) into examples.\n\nInput is a single blob where the first dimension is the batch size and the\nsecond dimension is the length of dense vectors. This operator produces a\nID_LIST where out_values are the indices of non-zero entries\nand out_lengths are the number of non-zeros entries in each row.\n\n",
+ "inputs": [
+ {
+ "description": "A data blob of dense vectors",
+ "name": "values"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Lengths of the sparse feature",
+ "name": "out_lengths"
+ },
+ {
+ "description": "Values of the sparse feature",
+ "name": "out_values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DepthConcat",
+ "description": "Backward compatible operator name for Concat.",
+ "support_level": "default"
+ },
+ {
+ "name": "DepthSplit",
+ "description": "Backward compatible operator name for Split.",
+ "support_level": "default"
+ },
+ {
+ "name": "DequeueBlobs",
+ "description": "\n Dequeue the blobs from queue.\n ",
+ "attributes": [
+ {
+ "description": "Timeout in secs, default: no timeout",
+ "name": "timeout_secs",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The shared pointer for the BlobsQueue",
+ "name": "queue"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The blob to store the dequeued data",
+ "name": "blob"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DequeueRebatchingQueue",
+ "description": "\nDequeue Tensors from the Queue.\nIf the Queue is closed this might return less elements than asked.\nIf num_elements > 1 the returned elements will be concatenated into one\ntensor per component.\n",
+ "attributes": [
+ {
+ "description": "Number of elements to dequeue. By default we dequeue one element.",
+ "name": "num_elements",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "object representing the queue",
+ "name": "rebatching_queue"
+ },
+ {
+ "description": "First tensor to enqueue",
+ "name": "tensor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DestroyCommonWorld",
+ "description": "Closes all connections managed by a common world.",
+ "inputs": [
+ {
+ "description": "The common world to be destroyed.",
+ "name": "common_world"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DiagonalFill",
+ "description": "\nThe operator fills the diagonal elements of the output tensor (>= 2D)\nwith a constant value specified by the 'value' argument, and others 0. If\nnumber of dimensions of the output tensor is greater than 2, all dimensions\nmust be equal.\n\nThe data type is specified by the 'dtype' argument. The 'dtype' argument must\nbe one of the data types specified in the 'DataType' enum field in the\nTensorProto message. If the 'dtype' argument is not provided, the data type of\n'value' is used.\n\nThe output tensor shape is specified by the 'shape' argument. If the number of\ninput is 1, the shape will be identical to that of the input at run time with\noptional additional dimensions appended at the end as specified by 'extra_shape'\nargument. In that case the 'shape' argument should not be set.\n\nIf input_as_shape is set to true, then the input should be a 1D tensor\ncontaining the desired output shape (the dimensions specified in extra_shape\nwill also be appended)\n\nNOTE: Currently, it supports data type of float, int32, int64, and bool.\n",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "value",
+ "option": "optional"
+ },
+ {
+ "description": "The data type for the elements of the output tensor.Strictly must be one of the types from DataType enum in TensorProto.",
+ "name": "dtype",
+ "option": "optional"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor (optional) to provide shape information.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensorargument and its type is specified by the 'dtype' argument",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DistributeFpnProposals",
+ "description": "\n...\n",
+ "attributes": [
+ {
+ "description": "(int) ROI_CANONICAL_SCALE",
+ "name": "roi_canonical_scale",
+ "option": "optional"
+ },
+ {
+ "description": "(int) ROI_CANONICAL_LEVEL",
+ "name": "roi_canonical_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) ROI_MAX_LEVEL",
+ "name": "roi_max_level",
+ "option": "optional"
+ },
+ {
+ "description": "(int) ROI_MIN_LEVEL",
+ "name": "roi_min_level",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Top proposals limited to rpn_post_nms_topN total, format (image_index, x1, y1, x2, y2)",
+ "name": "rois"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "RPN proposals for ROI level 2, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn2"
+ },
+ {
+ "description": "RPN proposals for ROI level 3, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn3"
+ },
+ {
+ "description": "RPN proposals for ROI level 4, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn4"
+ },
+ {
+ "description": "RPN proposals for ROI level 5, format (image_index, x1, y1, x2, y2)",
+ "name": "rois_fpn5"
+ },
+ {
+ "description": "Permutation on the concatenation of all rois_fpni, i=min...max, such that when applied the RPN RoIs are restored to their original order in the input blobs.",
+ "name": "rois_idx_restore"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Div",
+ "description": "\nPerforms element-wise binary division (with limited broadcast support).\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Div\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([[18,8],[2,9]]))\nworkspace.FeedBlob(\"B\", np.array([[9,2],[3,2]]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA:\n[[18 8]\n [ 2 9]]\nB:\n[[9 2]\n [3 2]]\nC:\n[[2 4]\n [0 4]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than A. If broadcasting is disabled it should be of the same size as A.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions and type as A.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DivGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Do",
+ "description": "\n'Do' control operator, executes a subnet in a separate workspace.\nLast blobs in the input and output lists should be the same blob created with\nCreateScope op. Arguments 'inner_blobs' and 'outer_blobs_idx' provide a mapping\nbetween selected inner blob names and corresponding outer blob indices.\n ",
+ "attributes": [
+ {
+ "description": "Subnet with blob bindings",
+ "name": "net",
+ "option": "optional"
+ },
+ {
+ "description": "List of inner net blob names to bind to outer workspace",
+ "name": "inner_blobs",
+ "option": "optional"
+ },
+ {
+ "description": "Indices of corresponding outer workspace blobs, in order: operator inputs, operator outputs (skipping workspace blobs)",
+ "name": "outer_blobs_idx",
+ "option": "optional"
+ },
+ {
+ "description": "List of blobs from the forward Do operator workspace needed in backward pass, used in gradient Do operator",
+ "name": "saved_fwd_blobs",
+ "option": "optional"
+ },
+ {
+ "description": "Whether to reuse workspace or create a new one in a given scope",
+ "name": "reuse_workspace",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DotProduct",
+ "description": "\nComputes and outputs the dot product of the two input float tensors `X` and `Y`.\nNote that `X` and `Y` must be either 1D or 2D, and they must be the same shape.\nThe output tensor is 1D, which represents either the product of each element in\na respective dimension if the inputs are 1D, or the sum of the products in a\ngiven dimension if the inputs are 2D matrices. Note that the actual dot product\nis a scalar value, which is effectively the sum of the elements in the 1D\noutput tensor.\n\nFor 1D inputs:\nGiven two vectors $X = [x_0, x_1, x_2]$ and $Y = [y_0, y_1, y_2]$; $Z = [x_0 * y_0, x_1 * y_1, x_2 * y_2]$\n\nFor 2D inputs:\nGiven two matrices:\n$$X = [[x_0^0, x_1^0, x_2^0], \\\\ [x_0^1, x_1^1, x_2^1], \\\\ [x_0^2, x_1^2, x_2^2], \\\\ ..., \\\\ [x_0^n, x_1^n, x_2^n]]$$\n\nand\n\n$$Y = [[y_0^0, y_1^0, y_2^0], \\\\ [y_0^1, y_1^1, y_2^1], \\\\ [y_0^2, y_1^2, y_2^2], \\\\ ..., \\\\ [y_0^n, y_1^n, y_2^n]]$$\n\nthen\n\n$$Z = \\biggl[\\Big((x_0^0 * y_0^0) + (x_1^0 * y_1^0) + (x_2^0 * y_2^0)\\Big), \\\\ \\Big((x_0^1 * y_0^1) + (x_1^1 * y_1^1) + (x_2^1 * y_2^1)\\Big), \\\\ \\Big((x_0^2 * y_0^2) + (x_1^2 * y_1^2) + (x_2^2 * y_2^2)\\Big), \\\\ ..., \\\\ \\Big((x_0^n * y_0^n) + (x_1^n * y_1^n) + (x_2^n * y_2^n)\\Big)\\biggr]$$\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"DotProduct\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(20, size=(5)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", np.random.randint(20, size=(5)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Z:\\n\", workspace.FetchBlob(\"X\"))\n\n\nworkspace.ResetWorkspace()\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [ 2. 15. 2. 7. 12.]\nY:\n [ 3. 12. 9. 3. 18.]\nZ:\n [ 2. 15. 2. 7. 12.]\nX:\n [[2. 0. 4.]\n [7. 7. 4.]\n [7. 9. 9.]]\nY:\n [[2. 0. 8.]\n [9. 6. 1.]\n [7. 8. 0.]]\nZ:\n [ 36. 109. 121.]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* 1D or 2D input tensor.",
+ "name": "X"
+ },
+ {
+ "description": "*(type: Tensor``)* 1D or 2D input tensor (must have the same shape as X).",
+ "name": "Y"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* 1D output tensor.",
+ "name": "Z"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DotProductGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "DotProductWithPadding",
+ "description": "\nGiven two input float tensors X, Y with different shapes and produces one\noutput float tensor of the dot product between X and Y. We currently support\ntwo kinds of strategies to achieve this. Before doing normal dot_product 1)\npad the smaller tensor (using pad_value) to the same shape as the other one.\n2) replicate the smaller tensor to the same shape as the other one. Note the\nfirst dimension of X, Y must be equal. Only the second dimension of X or Y\ncan be padded.\n",
+ "attributes": [
+ {
+ "description": "the padding value for tensors with smaller dimension",
+ "name": "pad_value",
+ "option": "optional"
+ },
+ {
+ "description": "whether to replicate the smaller tensor or not",
+ "name": "replicate",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D or 2D input tensor",
+ "name": "X"
+ },
+ {
+ "description": "1D or 2D input tensor",
+ "name": "Y"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor",
+ "name": "Z"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DotProductWithPaddingGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Dropout",
+ "category": "Dropout",
+ "description": "\n\n`Dropout` takes one input data tensor (`X`) and produces two tensor outputs, `Y` and\n`mask`. If the `is_test` argument is zero (default=0), the output `Y` will be the input\nwith random elements zeroed. The probability that a given element is zeroed is\ndetermined by the `ratio` argument.\n\nIf the `is_test` argument is set to non-zero, the output `Y` is exactly the same as the\ninput `X`. Note that outputs are scaled by a factor of $\\frac{1}{1-ratio}$ during\ntraining, so that during test time, we can simply compute an identity function. This\nscaling is important because we want the output at test time to equal the expected value\nat training time. Dropout has been proven to be an effective regularization technique to\nprevent overfitting during training.\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/dropout_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Dropout\",\n [\"X\"],\n [\"Y\"] + [\"mask\"],\n ratio=0.5,\n is_test=0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randint(10, size=(5, 5)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"mask:\", workspace.FetchBlob(\"mask\"))\n```\n\n**Result**\n\n```\nX: [[5. 4. 3. 6. 9.]\n [2. 1. 8. 0. 9.]\n [7. 3. 0. 6. 3.]\n [1. 8. 2. 6. 4.]\n [6. 2. 6. 4. 0.]]\nY: [[ 0. 0. 0. 12. 18.]\n [ 0. 0. 16. 0. 0.]\n [ 0. 0. 0. 12. 6.]\n [ 0. 0. 4. 0. 0.]\n [12. 0. 0. 0. 0.]]\nmask: [[False False False True True]\n [False False True True False]\n [False False True True True]\n [False False True False False]\n [ True False False False False]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0.5,
+ "description": "Probability of an element to be zeroed.",
+ "name": "ratio",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "default": 0,
+ "description": "If zero (train mode), perform dropout. If non-zero(test mode), Y = X.",
+ "name": "is_test",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The input data as Tensor.",
+ "name": "data"
+ },
+ {
+ "description": "*(type: Tensor``)* Input data tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output.",
+ "name": "output"
+ },
+ {
+ "description": "*(type: Tensor``)* The output mask containing boolean values foreach element, signifying which elements are dropped out. If `is_test` isnonzero, this output is not filled.",
+ "name": "mask"
+ },
+ {
+ "description": "*(type: Tensor``)* Output tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "DropoutGrad",
+ "support_level": "default"
+ },
+ {
+ "name": "ElementwiseLinear",
+ "description": "\nThis op computes the elementwise linear combination of a batch of input vectors with a weight vector and bias vector. As input, the op takes an input tensor $X$ of shape $NxD$, a weight vector $w$ of length $D$, and a bias vector $b$ of length $D$. Here, $N$ represents the batch size and $D$ represents the length of the feature vectors. The output, $Y$, is a tensor of shape $NxD$ and is calculated as\n\n$$Y_{ij} = X_{ij}w_j + b_j \\ for \\ i\\in{N}, j\\in{D}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_linear_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_linear_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ElementwiseLinear\",\n [\"X\", \"w\", \"b\"],\n [\"Y\"]\n)\n\n// Create X\nX = np.array([[1,2,3,4,5],[6,8,9,16,10]])\nprint(\"X:\\n\",X)\n\n// Create w\nw = np.array([1,1/2.,1/3.,1/4.,1/5.])\nprint(\"w:\\n\",w)\n\n// Create b\nb = np.array([1.,1.,1.,1.,1.])\nprint(\"b:\\n\",b)\n\n\n// Feed X & w & b into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"w\", w.astype(np.float32))\nworkspace.FeedBlob(\"b\", b.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 1 2 3 4 5]\n [ 6 8 9 16 10]]\nw:\n [1. 0.5 0.33333333 0.25 0.2]\nb:\n [1. 1. 1. 1. 1.]\nY:\n [[2. 2. 2. 2. 2.]\n [7. 5. 4. 5. 3.]]\n\n```\n\n \n\n ",
+ "attributes": [
+ {
+ "default": 1,
+ "description": "Describes the axis of the inputs; defaults to one because the 0th axis most likely describes the batch size.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "2D input tensor of size $NxD$. This input represents the input data to be operated on.",
+ "name": "X"
+ },
+ {
+ "description": "1D scaling factors, or weights, of size $D$. This input contains the weights that will be multiplied by the data.",
+ "name": "w"
+ },
+ {
+ "description": "1D biases of size $D$. This input contains the biases that will be added to the products of the weights and data.",
+ "name": "b"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2D output tensor of size $NxD$. Calculated as described above.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ElementwiseLinearGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Elu",
+ "description": "\n\nThis op implements the exponential linear unit (ELU) activation function as described in [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs)](https://arxiv.org/abs/1511.07289). The op takes an input tensor $X$ of arbitrary shape, computes the elementwise elu operation, and returns a vector $Y$ of the same shape as output. The alpha parameter may be passed as an argument, but defaults to 1. The elu operation is defined as\n\n$$y=f(x) =\\begin{cases}\\alpha(e^x-1) & x < 0 \\\\ x & otherwise\\end{cases}$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elu_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Elu\",\n [\"X\"],\n [\"Y\"],\n alpha=1.1\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[ 0.35339102 1.1860217 -0.10710736]\n [-3.1173866 -0.1889988 -0.20330353]\n [ 1.8525308 -0.368949 0.506277 ]]\n\nY:\n [[ 0.35339102 1.1860217 -0.11172786]\n [-1.0513 -0.18943374 -0.20236646]\n [ 1.8525308 -0.33939326 0.506277 ]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 1.0,
+ "description": "Defines alpha parameter used in calculation.",
+ "name": "alpha",
+ "option": "optional",
+ "type": "float32"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D input tensor of data to be operated on.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D input tensor, calculated as described above.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "EluGradient",
+ "description": "\nEluGradient takes both Y and dY and uses this to update dX according to the\nchain rule and derivatives of the rectified linear function.\n",
+ "support_level": "default"
+ },
+ {
+ "name": "EnforceFinite",
+ "description": "\nRaise if there is NaN or Inf values in the input tensor.\n",
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "input"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "EnqueueBlobs",
+ "support_level": "default"
+ },
+ {
+ "name": "EnqueueRebatchingQueue",
+ "description": "\nEnqueues Tensors into the queue.\nNumber of input tensors should be equal to the number of components passed\nduring creation of the queue.\nIf the Queue is closed this operation will fail.\nIf enqueue_batch argument is set. We will split the input tensors by the\nfirst dimension to produce single queue elements.\n",
+ "attributes": [
+ {
+ "description": "Are we enqueuing a batch or just a single element. By default we enqueue single element.",
+ "name": "enqueue_batch",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "object representing the queue",
+ "name": "queue"
+ },
+ {
+ "description": "First tensor to enque. ",
+ "name": "tensor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "EnsureClipped",
+ "description": "\nGiven a tensor, apply clip after gradient is applied; when the param is sparse as\nindicated by valid indices and grad, in-place is required\n",
+ "inputs": [
+ {
+ "description": "Parameters to be normalized",
+ "name": "param"
+ },
+ {
+ "description": "Sparse indices, only needed for sparse param",
+ "name": "indices"
+ },
+ {
+ "description": "Gradient computed, only needed for sparse param",
+ "name": "grad"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "param ensured to be clipped within range",
+ "name": "output_param"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "EnsureCPUOutput",
+ "description": "\nThis Op always create TensorCPU output, and may involves cross-device MemCpy.\nUnder CPU Context, this Op takes TensorCPU as input. Under the CUDA Context,\nthis Op accepts either CUDA or CPU Tensor input.\n",
+ "inputs": [
+ {
+ "description": "The input CUDA or CPU tensor.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "TensorCPU that is a copy of the input.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "EnsureDense",
+ "description": "\nThis operator converts dense or sparse gradients to dense ones.\nTherefore, sparse gradient can be back propagated to Operators that consume\ndense gradients only (e.g., FCGradient).\n\nThe operator's behaviors:\n\n- In forward, simply pass in place or copy input to the output.\n- In backward, if the gradient passed-in is sparse gradient, change it to dense gradient in linear time; otherwise, simply pass the dense gradient.\n",
+ "inputs": [
+ {
+ "description": "Input tensors.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor. Same dimension as inputs.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "EQ",
+ "description": "\nPerforms element-wise equal to comparison **==** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"EQ\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True False False True True False]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Erf",
+ "description": "\nCalculates the arcsine of the given input tensor, element-wise.\n",
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The arcsine of the input tensor computed element-wise",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ErfGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Exp",
+ "description": "\nCalculates the exponential of the given input tensor ($exp(x)$), element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/exp_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Exp\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[0.5821691 0.07719802 0.50159824]\n [0.40952456 0.36788362 0.84887683]\n [0.02472685 0.65730894 0.9066397 ]]\nX after running op:\n[[1.7899168 1.080256 1.6513585]\n [1.5061016 1.4446739 2.3370204]\n [1.0250351 1.9295927 2.4759884]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* The exponential of the input tensor computed element-wise.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Expand",
+ "description": "\n Broadcast the input tensor to a materialized new tensor using given shape.\n Broadcast rule is similar to \"numpy.array(input) * numpy.ones(shape)\":\n Dimensions are right alignment;\n Two corresponding dimensions must have the same value, or one of them\n equals to 1.\n In order to align with PyTorch's `expand`, `shape` is allowed to have entries\n equal to -1, which means to preserve the size of the corresponding dimension\n in `X` (so it's actually equivalent to equal to 1).\n",
+ "inputs": [
+ {
+ "description": "(*Tensor``*): input tensor",
+ "name": "X"
+ },
+ {
+ "description": "(*Tensor``*): expand shape",
+ "name": "shape"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor``*): expanded tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ExpandDims",
+ "description": "\nThe *ExpandDims* op inserts single-dimensional entries into the shape of the input tensor *data,* and produces a single output tensor *expanded*. The op also takes an argument *dims* with a list of dimensions for where to add the single dimensional entries. If the same blob is provided as input and output, the operation is copy-free. This is the exact inverse operation of *Squeeze*.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/expand_squeeze_dims_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"ExpandDims\",\n [\"data\"],\n [\"expanded\"],\n dims=[0,1],\n)\n\nworkspace.FeedBlob(\"data\", np.zeros((100,100)).astype(np.float32))\nprint(\"data.shape:\", workspace.FetchBlob(\"data\").shape)\n\nworkspace.RunOperatorOnce(op)\nprint(\"expanded.shape:\", workspace.FetchBlob(\"expanded\").shape)\n\n```\n\n**Result**\n\n```\n\ndata.shape: (100, 100)\nexpanded.shape: (1, 1, 100, 100)\n\n```\n\n \n\n\n\n",
+ "attributes": [
+ {
+ "description": "List of dimensions of *data* to add single dimensional entry.",
+ "name": "dims",
+ "option": "optional",
+ "type": "int64[]"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor of data to be operated on.",
+ "name": "data"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Reshaped tensor with same data as input.",
+ "name": "expanded"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "ExpandGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Fail",
+ "support_level": "default"
+ },
+ {
+ "name": "FbFCPacked",
+ "description": "Same as FC,\n but the weight is prepacked as a fbgemm::PackedGemmMatrixFP16",
+ "support_level": "default"
+ },
+ {
+ "name": "FbGemmPack",
+ "description": "Prepack weight for fbgemm",
+ "inputs": [
+ {
+ "description": "row major format weight matrix",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Block row major packed format weight matrix",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FbGemmPackTranspose",
+ "description": "Prepack weight for fbgemm",
+ "inputs": [
+ {
+ "description": "col major format weight matrix",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Block col major packed format weight matrix",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FC",
+ "category": "Layer",
+ "description": "\nThe FC operator computes an output $(Y)$ as a linear combination of the input data blob $(X)$ with a weight blob $(W)$ and bias blob $(b)$. More formally,\n\n$$Y = XW^T+b$$\n\nHere, $X$ is a matrix of shape $(M,K)$, $W$ is a matrix of shape $(N,K)$, $b$ is a vector of length $N$, and $Y$ is a matrix of shape $(M,N)$. $N$ can be thought of as the number of nodes in the layer, $M$ is the batch size, and $K$ is the number of features in an input observation.\n\n*NOTE: $X$ does not need to explicitly be a 2-dimensional matrix, however, if it is not it will be coerced into one. For an arbitrary $n$-dimensional tensor $X$, e.g. $[a_0, a_1, \\ldots ,a_{k-1}, a_k, \\ldots , a_{n-1}]$, where $a_i$ in $N$, and $k$ is the $axis$ arg provided, then $X$ will be coerced into a 2-dimensional tensor with dimensions $[a_0 * \\ldots * a_{k-1}, a_k * \\ldots * a_{n-1}]$. For the default case where axis=1, this means the $X$ tensor will be coerced into a 2D tensor of dimensions $[a_0, a_1 * \\ldots * a_{n-1}]$, where $a_0$ is often the batch size. In this situation, we must have $a_0 = M$ and $a_1 * \\ldots * a_{n-1} = K$. Lastly, even though $b$ is a vector of length $N$, it is copied and resized to shape $(M x N)$ implicitly, then added to each vector in the batch.*\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/fully_connected_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/fully_connected_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\n// In this example, our batch size is 1 (M=1), the input observation will have\n// 6 features (K=6), and the layer will have one hidden node (N=1). The\n// expected output is Y=7.\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FC\",\n [\"X\", \"W\", \"b\"],\n [\"Y\"]\n)\n\n// Create X: MxK\ndata = np.array([1,2,3,4,5,6]).astype(np.float32)\ndata = data[np.newaxis,:]\n\n// Create W: NxK\nweights = np.array(np.array([1,1/2.,1/3.,1/4.,1/5.,1/6.])).astype(np.float32)\nweights = weights[np.newaxis,:]\n\n// Create b: N\nbias = np.array([1.]).astype(np.float32)\n\n// Put the inputs into the workspace\nworkspace.FeedBlob(\"X\", data)\nworkspace.FeedBlob(\"W\", weights)\nworkspace.FeedBlob(\"b\", bias)\n\n// Run the operator\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nY:\n [[7.]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 1,
+ "description": "Describes the axis of the input data $X$. Defaults to one because in the common case when the input $X$ has shape $(M,K)$, the first axis encodes the batch size.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": 1,
+ "description": "Describes the axis of the input weight matrix $W$. Defaults to one because the first axis most likely describes the batch_size.",
+ "name": "axis_w",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": false,
+ "description": "Whether to use float-16 compute kernel.",
+ "name": "float16_compute",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input blob to be coerced into a 2D matrix of shape $(M,K)$, where $M$ is the batch size and $K$ is the number of features in a single observation.",
+ "name": "X"
+ },
+ {
+ "description": "Input blob to be coerced into a 2D matrix of shape $(N,K)$ describing a fully connected weight matrix. Here, $K$ is the number of features in a single observation and $N$ is the number of nodes in the FC layer.",
+ "name": "W"
+ },
+ {
+ "description": "Input blob containing vector of length $N$ which describes one bias for each node in the layer.",
+ "name": "b"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output blob containing a 2D output matrix of shape $(M,N)$, where $M$ is the batch size and $N$ is the number of nodes in the layer. The output is calculated as $Y=XW^T+b$.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FCGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "FCTransposed",
+ "description": "\nSame as FC, but weight matrix is supposed to be already pretransposed.\nFCTransposed stands for calling blass with no noTrans, noTrans\n",
+ "support_level": "default"
+ },
+ {
+ "name": "FCTransposedGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "FeedBlob",
+ "description": "\nFeedBlobs the content of the blobs. The input and output blobs should be\none-to-one inplace.",
+ "attributes": [
+ {
+ "description": "(string) if provided then we will use this string as the value for theprovided output tensor",
+ "name": "value",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FileStoreHandlerCreate",
+ "description": "\nCreates a unique_ptr that uses the filesystem as backing\nstore (typically a filesystem shared between many nodes, such as NFS).\nThis store handler is not built to be fast. Its recommended use is for\nintegration tests and prototypes where extra dependencies are\ncumbersome. Use an ephemeral path to ensure multiple processes or runs\ndon't interfere.\n",
+ "attributes": [
+ {
+ "description": "base path used by the FileStoreHandler",
+ "name": "path",
+ "option": "optional"
+ },
+ {
+ "description": "prefix for all keys used by this store",
+ "name": "prefix",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "unique_ptr",
+ "name": "handler"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Find",
+ "description": "\nFinds elements of second input from first input,\noutputting the last (max) index for each query.\nIf query not find, inserts missing_value.\nSee IndexGet() for a version that modifies the index when\nvalues are not found.\n",
+ "attributes": [
+ {
+ "description": "Placeholder for items that are not found",
+ "name": "missing_value",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Index (integers)",
+ "name": "index"
+ },
+ {
+ "description": "Needles / query",
+ "name": "query"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Indices of the needles in index or 'missing value'",
+ "name": "query_indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FindDuplicateElements",
+ "description": "\nThe *FindDuplicateElements* op takes a single 1-D tensor *data* as input and returns a single 1-D output tensor *indices*. The output tensor contains the indices of the duplicate elements of the input, excluding the first occurrences. If all elements of *data* are unique, *indices* will be empty.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/find_duplicate_elements_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/find_duplicate_elements_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FindDuplicateElements\",\n [\"data\"],\n [\"indices\"],\n)\n\nworkspace.FeedBlob(\"data\", np.array([8,2,1,1,7,8,1]).astype(np.float32))\nprint(\"data:\\n\", workspace.FetchBlob(\"data\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"indices: \\n\", workspace.FetchBlob(\"indices\"))\n\n```\n\n**Result**\n\n```\n\ndata:\n [8. 2. 1. 1. 7. 8. 1.]\nindices:\n [3 5 6]\n\n```\n\n \n\n\n ",
+ "inputs": [
+ {
+ "description": "a 1-D tensor.",
+ "name": "data"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Indices of duplicate elements in data, excluding first occurrences.",
+ "name": "indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Flatten",
+ "description": "\nFlattens the input tensor into a 2D matrix. If input tensor has shape\n$(d_0, d_1, ..., d_n)$ then the output will have shape\n$\\bigl((d_0 * d_1 * ... * d_{(axis-1)}), (d_{axis} * d_{(axis+1)} * ... * d_n)\\bigr)$.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/flatten_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Flatten\",\n [\"X\"],\n [\"Y\"],\n axis=1\n)\n\nworkspace.FeedBlob(\"X\", np.random.rand(1,3,2,2))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX: [[[[0.53432311 0.23734561]\n [0.56481598 0.52152617]]\n\n [[0.33662627 0.32472711]\n [0.17939016 0.97175851]]\n\n [[0.87226421 0.49045439]\n [0.92470531 0.30935077]]]]\nY: [[0.53432311 0.23734561 0.56481598 0.52152617 0.33662627 0.32472711\n 0.17939016 0.97175851 0.87226421 0.49045439 0.92470531 0.30935077]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 1,
+ "description": "Indicates up to which input dimensions (exclusive) should be flattened to the outer dimension of the output.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor)* Input Tensor of rank >= axis.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor)* A 2D tensor with the contents of the input tensor, with input dimensions up to `axis` flattened to the outer dimension of the output and the remaining input dimensions flattened into the inner dimension of the output.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FlattenToVec",
+ "description": "\n\nThe *FlattenToVec* op flattens the input tensor into a 1-D vector. The op accepts a single input tensor and returns a single output tensor.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"FlattenToVec\",\n [\"input\"],\n [\"output\"],\n)\n\nworkspace.FeedBlob(\"input\", np.array([[1,2,3],[4,5,6],[7,8,9],[10,11,12]]).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output: \\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [[ 1. 2. 3.]\n [ 4. 5. 6.]\n [ 7. 8. 9.]\n [10. 11. 12.]]\noutput:\n [ 1. 2. 3. 4. 5. 6. 7. 8. 9. 10. 11. 12.]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "A tensor of rank >= 1.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "A tensor of rank 1 (vector) with the contents of the input tensor.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FlexibleTopK",
+ "description": "\nGiven two tensors: X and K,\nretrieve the top K[..., 1] elements from X on the last dimension.\nX is an input tensor of shape [a_1, a_2, ..., a_n, r].\nK is an input tensor of shape [a_1, a_2, ..., a_n, 1],\nwhere for each element, r >= K[..., 1] > 0\nOutput two outputs:\n-Flatten values tensor of shape [ \\sum_i K[i, 1] ] which contains the values of\n the top K[..., 1] elements along the last dimension\n-Flatten indices tensor of shape [ \\sum_i K[i, 1] ] which contains the indices\n of the top K[..., 1] elements, flatten indices from the input tensor).\nThese two outputs should be used with the input K, so that we know which indices\nin X are picked.\n\nGiven two equivalent values, this operator uses the indices along the last dim-\nension as a tiebreaker. That is, the element with the lower index will appear\nfirst.\n ",
+ "inputs": [
+ {
+ "description": "Tensor of shape [a_1, a_2, ..., a_n, r]",
+ "name": "X"
+ },
+ {
+ "description": "Tensor of shape [a_1, a_2, ..., a_n, 1]",
+ "name": "K"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor of shape [ \\sum_i K[i, 1] ] containing top K[..., 1] values from the input tensor",
+ "name": "Flatten values"
+ },
+ {
+ "description": "Tensor of shape [ \\sum_i K[i, 1] ] containing the indices into the flatten input",
+ "name": "Flatten indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FlexibleTopKGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Float16ConstantFill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "value",
+ "option": "optional"
+ },
+ {
+ "description": "The shape of the output tensor.",
+ "name": "shape",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor of constant values specified by 'value'",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Float16SparseNormalize",
+ "description": "\nGiven a sparse matrix, apply max_norm or constant_norm sparse regularization.\n",
+ "attributes": [
+ {
+ "description": "A bool variable to control whether to use max norm or constant norm. When use_max_norm = false, constant norm is used so that all the embedding vectors are scaled to have a L2 norm equals to A (see blow argument norm=A). If use_max_norm = true, max norm is used so that embedding is scaled so that its l2 norm is no larger than A. If an embedding's norm is less than A originally, the embedding is left unchanged. The default is True.",
+ "name": "use_max_norm",
+ "option": "optional"
+ },
+ {
+ "description": "L2 norm of the embedding. The default is 1.0.",
+ "name": "norm",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Parameters to be normalized",
+ "name": "param"
+ },
+ {
+ "description": "Sparse indices",
+ "name": "indices"
+ },
+ {
+ "description": "Gradient computed (optional - not used, this argument is for backwards compatibility)",
+ "name": "grad"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Normalized parameters",
+ "name": "output_param"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Float16UniformFill",
+ "description": "Fills a half float tensor of a specified shape with values from a uniform distribution[min,max]",
+ "attributes": [
+ {
+ "description": "Shape of the tensor",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "Minimim value to generate",
+ "name": "min",
+ "option": "optional"
+ },
+ {
+ "description": "Maximum value to generate",
+ "name": "max",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFused2BitFakeRowwiseQuantized",
+ "description": "\nApplies 2-bit row-wise fake quantization to a tensor of floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n",
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFused2BitRowwiseQuantized",
+ "description": "\nApplies 2-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 2-bit number between 0 and\n3. To later de-quantize values, the scale (range / 3) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n",
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFused4BitFakeRowwiseQuantized",
+ "description": "\nApplies 4-bit row-wise fake quantization to a tensor of floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n",
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFused4BitRowwiseQuantized",
+ "description": "\nApplies 4-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 4-bit number between 0 and\n15. To later de-quantize values, the scale (range / 15) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n",
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFused8BitRowwiseQuantized",
+ "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 8 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.\nFor N-dimensional input tensor, the first N-1 dimensions are interpreted as\nrows and the last dimension is interpreted as a column. For example, an\ninput tensor with dimension 5x2x4 is interpreted as 10 rows and 4 columns.\n)\n",
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFused8BitRowwiseQuantizedHalfScaleBias",
+ "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 4 bytes\nof each row in the output matrix are a half float storing the scale\nfollowed by another half float containing the scale.)\n",
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToFusedRandRowwiseQuantized",
+ "description": "\nApplies row-wise stochastic/random quantization by determining the range of\neach row in the input matrix, and then quantize each element to one of two\nclosest discrete levels by randomly drawing Bernoulli distribution.\nThe method is extended from TernGrad [1],\nwhich randomly quantizes gradients to three levels to reduce communication in distributed training.\nThe format of each row (x) in the output matrix is [bitwidth][tail][min][max][data]:\nbitwidth[1 Byte]: bitwidth per data [1, 2, 4 or 8];\ntail[1 Byte]: the number of unused buckets [1-8] (One byte is split to 8/bitwidth buckets and each bucket stores one low-precision data in bitwidth bits);\nmin[4 Bytes]: the minimum floating value min(x);\nmax[4 Bytes]: the maximum floating value max(x);\ndata: quantized data.\nThe quantization is uniform with levels q = min + (max-min)/(2^bitwidth - 1)*[0:1:2^bitwidth].\nDuring stochastic/random quantization x'=Quantize(x), for q_j < x_i <= q_{j+1}, we draw quantization x'_i from Bernoulli distributions with\nP(x'_i = q_{j+1}) = (x_i - q_j)/(q_{j+1} - q_j), and\nP(x'_i = q_j) = (q_{j+1} - x_i)/(q_{j+1} - q_j) where x'_i is the quantized value of x_i.\n[1] proved E{x'_i}=x_i, which is an unbiased approximation. More details are in the paper.\nFor example, suppose targeted bitwidth = 2 and x = [0.3, -1.4, -0.6, 0.9, 1.0],\nthen tail = 3, min = -1.4, max = 1.0 and q = [-1.4, -0.6, 0.2, 1.0].\nx_1 = 0.3 will be quantized to x'_1 = 0.2 with probability 7/8 and to x'_1 = 1.0 with probability 1/8.\nThe storage format of quantized data is: [x'_1|x'_3|x'_5|xxx]-[x'_2|x'_4|xxx|xxx].\nIn general, a input row is split to multiple segments. One segment is a continuous subarray of the row,\nand its length is the number of bytes storing quantized data in the output matrix.\nThe b-th bucket of the i-th byte stores the i-th data of the b-th segment of input row.\n\n[1] Wen, Wei, Cong Xu, Feng Yan, Chunpeng Wu, Yandan Wang, Yiran Chen, and Hai Li.\n\"Terngrad: Ternary gradients to reduce communication in distributed deep learning.\"\nIn Advances in Neural Information Processing Systems, pp. 1508-1518. 2017.\n\n",
+ "attributes": [
+ {
+ "description": "How many bits to quantize per data (defaults to 8).",
+ "name": "bitwidth",
+ "option": "optional"
+ },
+ {
+ "description": "random or not (True). False is set up for unittest.",
+ "name": "random",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Float32 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused bitwidth, tail, min, max and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToHalf",
+ "support_level": "default"
+ },
+ {
+ "name": "FloatToRowwiseQuantized8Bits",
+ "description": "\nThis operator applies 8Bit row-wise quantization to\ninput tensor and returns quantized tensor. Row wise quantization of\ninput tensor is the following process. We take tensor of size\n(m_1, m_2,...,m_n), n >= 2, reshape it into matrix of size\n(m_1, m_2 x... x m_n) and apply row-wise quantization. After this,\nwe compute scale_i= (min_i - max_i) / 255 and bias_i = min_i for\ni-th row r_i of reshaped matrix, where min_i and max_i -- minimum\nand maximum elements of i-th row, and quantize each element r_{ij} as\n0 <= round(r_ij - bias_i) / scale_i) < 256. Instead of input tensor\nwe obtain uint8 tensor and auxiliary information as scale and bias to\nrestore input tensor (with losses).\n",
+ "inputs": [
+ {
+ "description": "input",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "quantized_input",
+ "name": "quantized_input"
+ },
+ {
+ "description": "Matrix of floats, each row r_i of which stores a pair s_i, b_i",
+ "name": "scale_bias"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Floor",
+ "description": "\nElement-wise application of the floor function ($y=floor(x)$) to the input\ntensor `X`. Output tensor shape is the same as the input tensor. This\noperator can be used in an in-place fashion by using the same input blob as the\noutput blob.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/floor_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Floor\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.uniform(-10, 10, (5,5))).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[ 3.813361 -1.319647 5.2089314 -4.931328 0.6218652 ]\n [ 7.2757645 5.5552588 5.785643 -2.4790506 -0.41400087]\n [ 1.1541046 -6.933266 3.3754056 1.6569928 -1.7670316 ]\n [-3.4932013 4.891472 1.5530115 -3.2443287 -4.605099 ]\n [-4.574543 -7.360948 5.91305 -8.196495 -5.357458 ]]\nX after running op:\n[[ 3. -2. 5. -5. 0.]\n [ 7. 5. 5. -3. -1.]\n [ 1. -7. 3. 1. -2.]\n [-4. 4. 1. -4. -5.]\n [-5. -8. 5. -9. -6.]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Free",
+ "description": "\nFrees the content of the blobs. The input and output blobs should be\none-to-one inplace.",
+ "support_level": "default"
+ },
+ {
+ "name": "Ftrl",
+ "support_level": "default"
+ },
+ {
+ "name": "Fused2BitRowwiseQuantizedToFloat",
+ "description": "\nDe-quantizes the result of the\nFloatToFused2BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float32 data",
+ "name": "float_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused2BitRowwiseQuantizedToHalf",
+ "description": "\nDe-quantizes the result of the\nFloatToFused2BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float16 data",
+ "name": "float16_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused4BitRowwiseQuantizedToFloat",
+ "description": "\nDe-quantizes the result of the\nFloatToFused4BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float32 data",
+ "name": "float_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused4BitRowwiseQuantizedToHalf",
+ "description": "\nDe-quantizes the result of the\nFloatToFused4BitRowwiseQuantized operator. The input is expected to first have\nquantized values, then 2-byte fp16 scale and 1-byte zero_offset. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and zero_point\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float16 data",
+ "name": "float16_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused8BitRowwiseQuantizedHalfScaleBiasToFloat",
+ "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 16-bit float in the second to the last 2 bytes of each\nrow, followed by the bias as a 16-bit float in the next 2 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float32 data",
+ "name": "float_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused8BitRowwiseQuantizedHalfScaleBiasToHalfFloat",
+ "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 16-bit float in the second to the last 2 bytes of each\nrow, followed by the bias as a 16-bit float in the next 2 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float32 data",
+ "name": "float_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused8BitRowwiseQuantizedToFloat",
+ "description": "\nDe-quantizes the result of the\nFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 32-bit float in the second to the last 4 bytes of each\nrow, followed by the bias as a 32-bit float in the next 4 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float32 data",
+ "name": "float_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Fused8BitRowwiseQuantizedToHalfFloat",
+ "description": "\nDe-quantizes the result of the\nHalfFloatToFused8BitRowwiseQuantized operator. The input is expected to\nencode the scale as a 32-bit float in the second to the last 4 bytes of each\nrow, followed by the bias as a 32-bit float in the next 4 bytes, and the\nquantized values in the preceding bytes of the row. The output is a\nmatrix containing only the values, but de-quantized. De-quantization is\nperformed by multiplying each value by its row's scale and bias\nparameters. The de-quantized values will thus not be exactly equal to\nthe original, un-quantized floating point values.\n",
+ "inputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "scale_bias_quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float16 data",
+ "name": "float16_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "FusedRandRowwiseQuantizedToFloat",
+ "description": "\nDe-quantizes the result of the FloatToFusedRandRowwiseQuantized operator.\nRefer FloatToFusedRandRowwiseQuantized operator for details.\n",
+ "inputs": [
+ {
+ "description": "Fused bitwidth, tail, min, max and quantized data",
+ "name": "quantized_input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Float32 data",
+ "name": "float_input"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Gather",
+ "category": "Transform",
+ "description": "\n\nThe *Gather* op accepts a *DATA* tensor of rank $r >= 1$ and *INDICES* tensor of rank $q$ as inputs. It then gathers entries of the outer-most dimension of *DATA*, indexed by *INDICES*, and concatenate them in an output tensor of rank $q + (r - 1)$.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/gather_op.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/gather_op.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Gather\",\n [\"DATA\", \"INDICES\"],\n [\"OUTPUT\"]\n)\ndata = np.array([[1., 1.2],[2.3, 3.4],[4.5, 5.7]])\nprint(\"DATA:\\n\",data)\n\ninds = np.array([[0, 1],[1, 2]])\nprint(\"INDICES:\\n\",inds)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"DATA\", data.astype(np.float32))\nworkspace.FeedBlob(\"INDICES\", inds.astype(np.int32))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT:\\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [[1. 1.2]\n [2.3 3.4]\n [4.5 5.7]]\nINDICES:\n [[0 1]\n [1 2]]\nOUTPUT:\n [[[1. 1.2]\n [2.3 3.4]]\n\n [[2.3 3.4]\n [4.5 5.7]]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "Input data tensor of rank $r>=1$",
+ "name": "DATA"
+ },
+ {
+ "description": "Input indices tensor of rank $q$. This tensor must contain integers.",
+ "name": "INDICES"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor of rank $q+(r-1)$",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GatherByKey",
+ "description": "\nInverse operation of Partition.\n\nTakes the original, full 'keys' tensor followed by sharded value tensors,\nand returns the full value tensor, combined using the same hash used in\nPartition.\n",
+ "inputs": [
+ {
+ "description": "The first input is the full keys tensor (same as the first input of Partition).",
+ "name": "keys"
+ },
+ {
+ "description": "Subsequented inputs are sharded values tensors.",
+ "name": "sharded_values"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Reconstructed values tensor.",
+ "name": "values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GatherFused8BitRowwise",
+ "description": "\nPerform the same operation as Gather, but operating on 8-bit rowwise quantized\nmatrices with fused storage (where each row stores quantized values, and then\nthe scale and offset).\nDATA needs to have rank 2 and INDICES needs to have rank 1.\n",
+ "inputs": [
+ {
+ "description": "uint8 tensor with rank 2 obtained with operator FloatToFused8BitRowwiseQuantized",
+ "name": "DATA"
+ },
+ {
+ "description": "Integer vector containing indices of the first dimension of DATA forthe rows that are being gathered",
+ "name": "INDICES"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "output",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GatherPadding",
+ "description": "\nGather the sum of start and end paddings in a padded input sequence. Used in\norder to compute the gradients of AddPadding w.r.t the padding tensors.\n",
+ "attributes": [
+ {
+ "description": "Outer-size of padding present around each range.",
+ "name": "padding_width",
+ "option": "optional"
+ },
+ {
+ "description": "(Optional) Specifies a different end-padding width.",
+ "name": "end_padding_width",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "T Padded input data",
+ "name": "data_in"
+ },
+ {
+ "description": "(i64) Num of elements in each range. sum(lengths) = N. If not provided, considers all data as a single segment.",
+ "name": "lengths"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Sum of all start paddings, or of all paddings if end_padding_sum is not provided.",
+ "name": "padding_sum"
+ },
+ {
+ "description": "T Sum of all end paddings, if provided.",
+ "name": "end_padding_sum"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GatherRanges",
+ "description": "\nGiven DATA tensor of rank 1, and RANGES tensor of rank 3, gather\ncorresponding ranges into a 1-D tensor OUTPUT.\n\nRANGES dimentions description:\n1: represents list of examples within a batch\n2: represents list features\n3: two values which are start and length or a range (to be applied on DATA)\n\nAnother output LENGTHS represents each example length within OUTPUT\n\nExample:\n DATA = [1, 2, 3, 4, 5, 6]\n RANGES = [\n [\n [0, 1],\n [2, 2],\n ],\n [\n [4, 1],\n [5, 1],\n ]\n ]\n OUTPUT = [1, 3, 4, 5, 6]\n LENGTHS = [3, 2]\n",
+ "inputs": [
+ {
+ "description": "Tensor of rank 1.",
+ "name": "DATA"
+ },
+ {
+ "description": "Tensor of int32/int64 ranges, of dims (N, M, 2). Where N is number of examples and M is a size of each example. Last dimension represents a range in the format (start, lengths)",
+ "name": "RANGES"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D tensor of size sum of range lengths",
+ "name": "OUTPUT"
+ },
+ {
+ "description": "1-D tensor of size N with lengths over gathered data for each row in a batch. sum(LENGTHS) == OUTPUT.size()",
+ "name": "LENGTHS"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GatherRangesToDense",
+ "description": "\nGiven DATA tensor of rank 1, and RANGES tensor of rank 3, gather values\ncorresponding to each range into a separate output tensor. If the optional input\nKEY tensor is also given, the output will be sorted by KEY for each example.\n\nRANGES dimensions description:\n1: represents list of examples within a batch\n2: represents list features\n3: two values which are start and length or a range (to be applied on DATA)\n\nEach feature has fixed lengths which are passed as lengths argument and a\nseparate tensor will be produced for each feature.\ni.e. DATA.dim(1) = len(lengths) = NumOuptuts.\n\nMissing features (represented by empty ranges) filled with default_value.\n\nExample 1:\n DATA = [1, 2, 3, 4, 5, 6, 7, 8]\n RANGES = [\n [\n [2, 4],\n [0, 2],\n ],\n [\n [0, 0],\n [6, 2],\n ]\n ]\n lengths = [4, 2]\n OUTPUT[0] = [[3, 4, 5, 6], [0, 0, 0, 0]]\n OUTPUT[1] = [[1, 2], [7, 8]]\n\nExample 2 (with KEY):\nDATA = [1, 2, 3, 4, 5, 6, 7, 8]\nKEY = [0, 1, 3, 2, 1, 0, 1, 0]\nRANGES = [\n [\n [2, 4],\n [0, 2],\n ],\n [\n [0, 0],\n [6, 2],\n ]\n]\nlengths = [4, 2]\nOUTPUT[0] = [[6, 5, 4, 3], [0, 0, 0, 0]]\nOUTPUT[1] = [[1, 2], [8, 7]]\n\nContrast Example 2 with Example 1. For each data point per feature, the values\nare sorted by the corresponding KEY.\n",
+ "attributes": [
+ {
+ "description": "Expected lengths for ranges",
+ "name": "lengths",
+ "option": "optional"
+ },
+ {
+ "description": "The number of observations needed before deciding that the ratio of mismatched ranges is alarming, also determines whether an info sumarizing the empty and mismatch ratio will be printed at the end.",
+ "name": "min_observation",
+ "option": "optional"
+ },
+ {
+ "description": "An error is raised when ratio of empty ranges exceeds this (default is 1, which means by default no error will be triggered).",
+ "name": "max_empty_ratio",
+ "option": "optional"
+ },
+ {
+ "description": "An error is raised when ratio of mismatched ranges exceeds this.",
+ "name": "max_mismatched_ratio",
+ "option": "optional"
+ },
+ {
+ "description": "A log is recorded only after an error is triggered every n times.",
+ "name": "log_every_n",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Tensor of rank 1.",
+ "name": "DATA"
+ },
+ {
+ "description": "Tensor of int32/int64 ranges, of dims (N, M, 2). Where N is number of examples and M is a size of each example. Last dimension represents a range in the format (start, lengths)",
+ "name": "RANGES"
+ },
+ {
+ "description": "Tensor of rank 1 and type int64.",
+ "name": "KEY"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D tensor of size sum of range lengths",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GaussianFill",
+ "description": "\nThis op fills an output tensor with samples drawn from a normal distribution specified by the mean and standard deviation arguments. The output tensor shape is specified by the *shape* argument. However, if *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: cannot set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GaussianFill\",\n [],\n [\"out\"],\n shape=[3,3],\n mean=2.0,\n std=1.1\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [[1.2084167 2.3336504 2.827349 ]\n [2.7108908 0.9374752 1.7173369 ]\n [0.03320992 2.1775863 1.0894578 ]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0.0,
+ "description": "Mean of the distribution to draw from.",
+ "name": "mean",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "default": 1.0,
+ "description": "Standard deviation of the distribution to draw from.",
+ "name": "std",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "description": "Desired shape of the *output* tensor.",
+ "name": "shape",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "default": false,
+ "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor of random values drawn from a normal distribution. If the shape argument is set, this is the shape specified, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GE",
+ "description": "\nPerforms element-wise greater or equal than comparison **>=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True True False True True False]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Gelu",
+ "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = xP(X <= x) where X ~ N(0, 1),\nis applied to the tensor elementwise.\n",
+ "attributes": [
+ {
+ "description": "If true, use y = 0.5x * (1 + tanh(sqrt(2/Pi) * (x + 0.044715x^3))).",
+ "name": "fast_gelu",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D input tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GeluGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "GenerateProposals",
+ "description": "\nGenerate bounding box proposals for Faster RCNN. The propoasls are generated for\na list of images based on image score 'score', bounding box regression result\n'deltas' as well as predefined bounding box shapes 'anchors'. Greedy\nnon-maximum suppression is applied to generate the final bounding boxes.\n",
+ "attributes": [
+ {
+ "description": "(float) spatial scale",
+ "name": "spatial_scale",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_PRE_NMS_TOP_N",
+ "name": "pre_nms_topN",
+ "option": "optional"
+ },
+ {
+ "description": "(int) RPN_POST_NMS_TOP_N",
+ "name": "post_nms_topN",
+ "option": "optional"
+ },
+ {
+ "description": "(float) RPN_NMS_THRESH",
+ "name": "nms_thresh",
+ "option": "optional"
+ },
+ {
+ "description": "(float) RPN_MIN_SIZE",
+ "name": "min_size",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default false), Correct bounding box transform coordates, see bbox_transform() in boxes.py Set to true to match the detectron code, set to false for backward compatibility",
+ "name": "correct_transform_coords",
+ "option": "optional"
+ },
+ {
+ "description": "bool (default true). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].",
+ "name": "angle_bound_on",
+ "option": "optional"
+ },
+ {
+ "description": "int (default -90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].",
+ "name": "angle_bound_lo",
+ "option": "optional"
+ },
+ {
+ "description": "int (default 90 degrees). If set, for rotated boxes, angle is normalized to be within [angle_bound_lo, angle_bound_hi].",
+ "name": "angle_bound_hi",
+ "option": "optional"
+ },
+ {
+ "description": "float (default 1.0 degrees). For RRPN, clip almost horizontal boxes within this threshold of tolerance for backward compatibility. Set to negative value for no clipping.",
+ "name": "clip_angle_thresh",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Scores from conv layer, size (img_count, A, H, W)",
+ "name": "scores"
+ },
+ {
+ "description": "Bounding box deltas from conv layer, size (img_count, 4 * A, H, W)",
+ "name": "bbox_deltas"
+ },
+ {
+ "description": "Image info, size (img_count, 3), format (height, width, scale)",
+ "name": "im_info"
+ },
+ {
+ "description": "Bounding box anchors, size (A, 4)",
+ "name": "anchors"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Proposals, size (n x 5), format (image_index, x1, y1, x2, y2)",
+ "name": "rois"
+ },
+ {
+ "description": "scores of proposals, size (n)",
+ "name": "rois_probs"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GenerateProposalsCPP",
+ "support_level": "default"
+ },
+ {
+ "name": "GetAllBlobNames",
+ "description": "\nReturn a 1D tensor of strings containing the names\nof each blob in the active workspace.\n",
+ "attributes": [
+ {
+ "description": "(bool, default true) Whether to include blobs inherited from parent workspaces.",
+ "name": "include_shared",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D tensor of strings containing blob names.",
+ "name": "blob_names"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GetCursorOffset",
+ "description": "Get the current offset in the cursor.",
+ "inputs": [
+ {
+ "description": "A blob containing a pointer to the cursor.",
+ "name": "cursor"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor containing the offsets for the cursor.",
+ "name": "offsets"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GFtrl",
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorBoolFill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorByteStringToUInt8Fill",
+ "description": "\nThis op fills a uint8 output tensor with the data specified by the *value* argument. The data must previously be serialized as a byte string. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\nThis op allows us to write uint8 tensors to Protobuf as byte strings and read them back as uint8 tensors in order to avoid the Protobuf uint32_t varint encoding size penalty.\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nval = np.array([1, 2, 3], dtype=np.uint8)\nop = core.CreateOperator(\n \"GivenTensorByteStringToUInt8Fill\",\n [],\n [\"out\"],\n values=[val.tobytes()],\n shape=val.shape,\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [1 2 3]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorDoubleFill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorFill",
+ "description": "\nThis op fills an output tensor with the data specified by the *value* and *dtype* arguments. The output tensor shape is specified by the *shape* argument. Beware, when using this argument *value* should have a value for every element of the *output*, as missing values will not be initialized automatically. If *input_as_shape* is set to *true*, then the *input* should be a 1D tensor containing the desired output shape (the dimensions specified in *extra_shape* will also be appended). In this case, the *shape* argument should **not** be set.\n\n*Note: Do not set the shape argument and pass in an input at the same time.*\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/given_tensor_fill_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GivenTensorFill\",\n [],\n [\"out\"],\n values=[1., 2., 3.],\n shape=[3],\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"Out:\\n\", workspace.FetchBlob(\"out\"))\n\n```\n\n**Result**\n\n```\n\nOut:\n [1. 2. 3.]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "The value of the elements to go in the *output* tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The data type for the elements of the output tensor. Strictly must be one of the types from DataType enum in TensorProto.",
+ "name": "dtype",
+ "option": "optional"
+ },
+ {
+ "description": "Desired shape of the *output* tensor.",
+ "name": "shape",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the *shape* indicated by the input blob. Cannot set the *extra_shape* argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional",
+ "type": "int64[]"
+ },
+ {
+ "default": false,
+ "description": "set to *True* to use the *input* as shape. First, input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(Optional) 1D tensor specifying the shape of the output. Must be used with *input_as_shape=True*",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor with desired dimension filled with specified data. If the shape argument is set, this is the shape specified, and if the *input* exists and *input_as_shape=True*, it is the shape specified by the *input* tensor.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorInt16Fill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorInt64Fill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorIntFill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GivenTensorStringFill",
+ "attributes": [
+ {
+ "description": "The value for the elements of the output tensor.",
+ "name": "values"
+ },
+ {
+ "description": "The shape of the output tensor.Cannot set the shape argument and pass in an input at the same time.",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "The additional dimensions appended at the end of the shape indicatedby the input blob.Cannot set the extra_shape argument when there is no input blob.",
+ "name": "extra_shape",
+ "option": "optional"
+ },
+ {
+ "description": "1D tensor containing the desired output shape. First input must be in CPU context.",
+ "name": "input_as_shape",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Glu",
+ "description": "\nApplies gated linear unit to the input Tensor X. The output Y is half the size\nof the input X, so if the shape of X is [d1, d2, ..., N] shape of Y will be\n[d1, d2, ..., dn/2] and Y(:dn-1, i) = GLU(X(:dn-1, i), X(:dn-1, i+N/2)) =\nX(dn-1, i) * sigmoid(X(dn-1, i+N/2))\n",
+ "inputs": [
+ {
+ "description": "1D input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GroupNorm",
+ "description": "\nGroup Normalization (GN) operation: https://arxiv.org/abs/1803.08494\n",
+ "attributes": [
+ {
+ "description": "(int) default 32; number of groups used by GN.",
+ "name": "num_groups",
+ "option": "optional"
+ },
+ {
+ "description": "(float) default 1e-5; small constant added to var.",
+ "name": "epsilon",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": ">=4D feature map input of shape (N, C, H, W) or (N, C, T, H, W)",
+ "name": "X"
+ },
+ {
+ "description": "The scale as a 1-dimensional tensor of size C to be applied to the output.",
+ "name": "gamma"
+ },
+ {
+ "description": "The bias as a 1-dimensional tensor of size C to be applied to the output.",
+ "name": "beta"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output >=4-dimensional tensor of the same shape as X.",
+ "name": "Y"
+ },
+ {
+ "description": "The mean of shape (N, G). For backward usage or reference. Cannot be used as activations.",
+ "name": "mean"
+ },
+ {
+ "description": "The std of shape (N, G). For backward usage or reference. Cannot be used as activations.",
+ "name": "std"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GroupNormGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "GRUUnit",
+ "description": "\nGRUUnit computes the activations of a standard GRU,\nin a sequence-length aware fashion.\n\nConcretely, given the (fused) inputs X (TxNxD), the previous hidden\nstate (NxD), and the sequence lengths (N), computes the GRU\nactivations, avoiding computation if the input is invalid (as in, the\nvalue at X[t][n] >= seqLengths[n].\n\n",
+ "attributes": [
+ {
+ "description": "Bool to determine if hidden state is zeroes or passed along for timesteps past the given sequence_length.",
+ "name": "drop_states",
+ "option": "optional"
+ },
+ {
+ "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.",
+ "name": "sequence_lengths",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The new GRU hidden state calculated by this op.",
+ "name": "hidden"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GRUUnitGradient",
+ "attributes": [
+ {
+ "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.",
+ "name": "sequence_lengths",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "GT",
+ "description": "\nPerforms element-wise greater than comparison **>** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"GT\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False True False False False False]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HalfFloatToFused8BitRowwiseQuantized",
+ "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 8 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.)\n",
+ "inputs": [
+ {
+ "description": "Float16 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HalfFloatToFused8BitRowwiseQuantizedHalfScaleBias",
+ "description": "\nApplies 8-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 8-bit number between 0 and\n255. To later de-quantize values, the scale (range / 255) and offset\n(bias) are stored alongside the data. More precisely, each row contains\nint8 elements for each quantized element, and the last 4 bytes\nof each row in the output matrix are a float storing the scale\nfollowed by another float containing the scale.)\n",
+ "inputs": [
+ {
+ "description": "Float16 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HalfToFloat",
+ "support_level": "default"
+ },
+ {
+ "name": "HalfToFused2BitFakeRowwiseQuantized",
+ "description": "\nApplies 2-bit row-wise fake quantization to a tensor of half floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n",
+ "inputs": [
+ {
+ "description": "Float16 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HalfToFused2BitRowwiseQuantized",
+ "description": "\nApplies 2-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 2-bit number between 0 and\n3. To later de-quantize values, the scale (range / 3) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n",
+ "inputs": [
+ {
+ "description": "Float16 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HalfToFused4BitFakeRowwiseQuantized",
+ "description": "\nApplies 4-bit row-wise fake quantization to a tensor of half floats.\nThe output looks like an int8 rowwise quantized blob with\nscale and biases in half float.\n",
+ "inputs": [
+ {
+ "description": "Float16 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HalfToFused4BitRowwiseQuantized",
+ "description": "\nApplies 4-bit row-wise quantization by determining the range\n(maximum - minimum) and offset (minimum value) of each row in the input\nmatrix, and then scaling each element to an 4-bit number between 0 and\n15. To later de-quantize values, the scale (range / 15) and zero_point\nare stored alongside the data. More precisely, each row first has quantized\nvalues, and then 2-byte fp16 scale and 2-byte zero_offset.)\n",
+ "inputs": [
+ {
+ "description": "Float16 input data",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Fused scale, bias and quantized data",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HardSigmoid",
+ "description": "\nApplies hard sigmoid operation to the input data element-wise.\nThe HardSigmoid operation takes one input $X$, produces one output $Y$, and is defined as:\n\n$$Y = max(0,min(1,x * alpha + beta))$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/hard_sigmoid_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"HardSigmoid\",\n [\"X\"],\n [\"Y\"],\n alpha = 0.2,\n beta = 0.5,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(5).astype(np.float32))\nprint(\"input:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"sigmoid:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\ninput: [ 1.5744036 0.31632107 1.7842269 1.4450722 -2.1726978 ]\nhard_sigmoid: [ 0.81488073, 0.56326419, 0.85684538, 0.78901446, 0.06546044]\n\n```\n\n \n\n\n",
+ "attributes": [
+ {
+ "description": "float: the slope of the function. Defaults to 0.2",
+ "name": "alpha",
+ "option": "optional"
+ },
+ {
+ "description": "float: the bias value of the function. Defaults to 0.5",
+ "name": "beta",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor with same shape as input",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HardSigmoidGradient",
+ "description": "\nHardSigmoidGradient takes both Y and dY as well as an argument alpha and uses\nthis to update dX according to the chain rule and derivatives of the hard\nsigmoid function.\n",
+ "support_level": "default"
+ },
+ {
+ "name": "HasElements",
+ "description": "\nThe *HasElements* op accepts a single or multiple input tensors, and produces a single boolean output $has\\_elements$. The output is *True* if and only if any of the input tensor has size > 0. Note, this op is the opposite of the *IsEmpty* op.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"HasElements\",\n [\"tensor\"],\n [\"has_elements\"],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"tensor\", np.random.randn(2, 2).astype(np.float32))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"has_elements: \", workspace.FetchBlob(\"has_elements\"),\"\\n\")\n\n// Use an empty tensor\nworkspace.FeedBlob(\"tensor\", np.empty(0))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"has_elements: \", workspace.FetchBlob(\"has_elements\"))\n\n```\n\n**Result**\n\n```\n\ntensor:\n [[ 0.6116506 -0.54433197]\n [ 0.19406661 -0.7338629 ]]\nhas_elements: True\n\ntensor:\n []\nhas_elements: False\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "Input data tensor to check for elements.",
+ "name": "tensor"
+ },
+ {
+ "description": "List of input data tensors to check for elements.",
+ "name": "X1, X2, ..."
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output scalar boolean tensor. True if input has size > 0.",
+ "name": "has_elements"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HasScope",
+ "description": "\nChecks whether scope blob has any saved scopes left\n ",
+ "support_level": "default"
+ },
+ {
+ "name": "HeatmapMaxKeypoint",
+ "support_level": "default"
+ },
+ {
+ "name": "Histogram",
+ "description": "\n Computes a histogram for values in the given list of tensors.\n For logging activation histograms for post-hoc analyses, consider using the\n HistogramObserver observer.\n For iteratively computing a histogram for all input tensors encountered through\n history, consider using the AccumulateHistogram operator.\n ",
+ "attributes": [
+ {
+ "description": "length-(k + 1) sequence of float values wherein the i-th element represents the inclusive left boundary of the i-th bin for i in [0, k - 1] and the exclusive right boundary of the (i-1)-th bin for i in [1, k].",
+ "name": "bin_edges",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* List of input tensors.",
+ "name": "X1, X2, ..."
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D tensor of length k, wherein the i-th element expresses the count of tensor values that fall within range [bin_edges[i], bin_edges[i + 1])",
+ "name": "histogram"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HSoftmax",
+ "description": "\nHierarchical softmax is an operator which approximates the softmax operator\nwhile giving significant training speed gains and reasonably comparable\nperformance. In this operator, instead of calculating the probabilities of all\nthe classes, we calculate the probability of each step in the path from root to\nthe target word in the hierarchy.\n\nThe operator takes a 2-D tensor (Tensor) containing a batch of layers, a\nset of parameters represented by the weight matrix and bias terms, and a 1-D\ntensor (Tensor) holding labels, or the indices of the target class. The\nhierarchy has to be specified as an argument to the operator.\n\nThe operator returns a 1-D tensor holding the computed log probability of the\ntarget class and a 2-D tensor of intermediate outputs (from the weight matrix\nand softmax from each step in the path from root to target class) which will be\nused by the gradient operator to compute gradients for all samples in the batch.\n",
+ "attributes": [
+ {
+ "description": "Serialized HierarchyProto string containing list of vocabulary words and their paths from root of hierarchy to the leaf",
+ "name": "hierarchy",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data from previous layer",
+ "name": "X"
+ },
+ {
+ "description": "2D blob containing 'stacked' fully connected weight matrices. Each node in the hierarchy contributes one FC weight matrix if it has children nodes. Dimension is N*D, D is input dimension of data (X), N is sum of all output dimensions, or total number of nodes (excl root)",
+ "name": "W"
+ },
+ {
+ "description": "1D blob with N parameters",
+ "name": "b"
+ },
+ {
+ "description": "int word_id of the target word",
+ "name": "labels"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D of log probability outputs, one per sample",
+ "name": "Y"
+ },
+ {
+ "description": "Extra blob to store the intermediate FC and softmax outputs for each node in the hierarchical path of a word. The outputs from samples are stored in consecutive blocks in the forward pass and are used in reverse order in the backward gradientOp pass",
+ "name": "intermediate_output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HSoftmaxGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "HSoftmaxSearch",
+ "description": "\nHSoftmaxSearch is an operator to generate the most possible paths given a\nwell-trained model and input vector. Greedy algorithm is used for pruning the\nsearch tree.\n",
+ "attributes": [
+ {
+ "description": "Serialized TreeProto string containing a tree including all intermidate nodes and leafs. All nodes must have names for correct outputs",
+ "name": "tree",
+ "option": "optional"
+ },
+ {
+ "description": "beam used for pruning tree. The pruning algorithm is that only children, whose score is smaller than parent's score puls beam, will be propagated.",
+ "name": "beam",
+ "option": "optional"
+ },
+ {
+ "description": "Number of nodes in outputs",
+ "name": "topN",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data from previous layer",
+ "name": "X"
+ },
+ {
+ "description": "The matrix trained from Softmax Ops",
+ "name": "W"
+ },
+ {
+ "description": "The bias trained from Softmax Ops",
+ "name": "b"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The name of selected nodes and leafs. For nodes, it will be the name defined in the tree. For leafs, it will be the index of the word in the tree.",
+ "name": "Y_names"
+ },
+ {
+ "description": "The corresponding scores of Y_names",
+ "name": "Y_scores"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "HuffmanTreeHierarchy",
+ "description": "\nHuffmanTreeHierarchy is an operator to generate huffman tree hierarchy given\nthe input labels. It returns the tree as serialized HierarchyProto\n",
+ "attributes": [
+ {
+ "description": "The number of classes used to build the hierarchy.",
+ "name": "num_classes",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The labels vector",
+ "name": "Labels"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Huffman coding hierarchy of the labels",
+ "name": "Hierarch"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "If",
+ "description": "\n'If' control operator, first input is a scalar boolean blob that stores condition\nvalue. Accepts 'then_net' (required) and 'else_net' (optional) arguments for 'then' and\n'else' subnets respectively. Subnets are executed in the same workspace as 'If'.\n ",
+ "attributes": [
+ {
+ "description": "Net executed when condition is true",
+ "name": "then_net",
+ "option": "optional"
+ },
+ {
+ "description": "Net executed when condition is false (optional)",
+ "name": "else_net",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Scalar boolean condition",
+ "name": "condition"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Im2Col",
+ "description": "The Im2Col operator from Matlab.",
+ "inputs": [
+ {
+ "description": "4-tensor in NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "4-tensor. For NCHW: N x (C x kH x kW) x outH x outW.For NHWC: N x outH x outW x (kH x kW x C",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IncrementPut",
+ "description": "\n Consume a value and pushes it to the global stat registry as an sum.\n\n Github Links:\n - https://github.com/pytorch/pytorch/blob/master/caffe2/operators/stats_put_ops.cc\n\n ",
+ "attributes": [
+ {
+ "description": "(*str*): name of the stat. If not present, then uses name of input blob",
+ "name": "name",
+ "option": "optional"
+ },
+ {
+ "description": "(*int64_t*): number to multiply input values by (used when inputting floats, as stats can only receive integers",
+ "name": "magnitude_expand",
+ "option": "optional"
+ },
+ {
+ "description": "(*boolean*): whether or not to clamp inputs to the max inputs allowed",
+ "name": "bound",
+ "option": "optional"
+ },
+ {
+ "description": "(*float*): Optionally provide a default value for receiving empty tensors",
+ "name": "default_value",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(*Tensor``*): A scalar tensor, representing any numeric value",
+ "name": "value"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IndexFreeze",
+ "description": "\nFreezes the given index, disallowing creation of new index entries.\nShould not be called concurrently with IndexGet.\n",
+ "inputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handle"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The input handle.",
+ "name": "handle"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IndexGet",
+ "description": "\nGiven an index handle and a tensor of keys, return an Int tensor of same shape\ncontaining the indices for each of the keys. If the index is frozen, unknown\nentries are given index 0. Otherwise, new entries are added into the index.\nIf an insert is necessary but max_elements has been reached, fail.\n",
+ "inputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handle"
+ },
+ {
+ "description": "Tensor of keys to be looked up.",
+ "name": "keys"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Indices for each of the keys.",
+ "name": "indices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IndexHash",
+ "description": "\nThis operator translates a list of indices into a list of hashed indices.\nA seed can be fed as an argument to change the behavior of the hash function.\nIf a modulo is specified, all the hashed indices will be modulo the\nspecified number. All input and output indices are enforced to be positive.\n",
+ "attributes": [
+ {
+ "description": "seed for the hash function",
+ "name": "seed",
+ "option": "optional"
+ },
+ {
+ "description": "must be > 0, hashed ids will be modulo this number",
+ "name": "modulo",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input feature indices.",
+ "name": "Indices"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Hashed feature indices.",
+ "name": "HashedIndices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IndexLoad",
+ "description": "\nLoads the index from the given 1-D tensor. Elements in the tensor will be given\nconsecutive indexes starting at 1. Fails if tensor contains repeated elements.\n",
+ "attributes": [
+ {
+ "description": "If set, skips the first entry of the tensor. This allows to load tensors that are aligned with an embedding, where the first entry corresponds to the default 0 index entry.",
+ "name": "skip_first_entry",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handle"
+ },
+ {
+ "description": "1-D tensor with elements starting with index 1.",
+ "name": "items"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The input handle.",
+ "name": "handle"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IndexSize",
+ "description": "\nReturns the number of entries currently present in the index.\n",
+ "inputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handle"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Scalar int64 tensor with number of entries.",
+ "name": "items"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IndexStore",
+ "description": "\nStores the keys of this index in a 1-D tensor. Since element 0 is reserved\nfor unknowns, the first element of the output tensor will be element of index 1.\n",
+ "inputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handle"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D tensor with elements starting with index 1.",
+ "name": "items"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "InferenceLSTM",
+ "attributes": [
+ {
+ "description": "(*long*): number of layers in the lstm stack",
+ "name": "num_layers",
+ "option": "optional"
+ },
+ {
+ "description": "(*bool*): whether the cells have biases or not",
+ "name": "has_biases",
+ "option": "optional"
+ },
+ {
+ "description": "(*bool*): whether the batch is at dim 0",
+ "name": "batch_first",
+ "option": "optional"
+ },
+ {
+ "description": "(*bool*): if bidirectional",
+ "name": "bidirectional",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "the output of the last layer of lstm",
+ "name": "output"
+ },
+ {
+ "description": "hidden state at t = seq_len",
+ "name": "hidden"
+ },
+ {
+ "description": "cell state at t = seq_len",
+ "name": "cell"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "InstanceNorm",
+ "description": "\nThe *InstanceNorm* op applies Instance Normalization over a 4D input as described in [Instance Normalization: The Missing Ingredient for Fast Stylization](https://arxiv.org/abs/1607.08022).\n\n$$output = \\frac{input-\\mu_{input}}{\\sqrt{\\sigma_{input}^2} + \\epsilon}*scale + bias$$\n\nNotice, two of the outputs are optional so there are three output cases for this op. Case 1: output; Case 2: output, saved_mean; Case 3: output, saved_mean, saved_inv_stdev.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/instance_norm_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/instance_norm_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"InstanceNorm\",\n [\"input\", \"scale\", \"bias\"],\n [\"output\"],\n epsilon=1e-5,\n)\n\nworkspace.FeedBlob(\"input\", np.random.randn(2, 1, 3, 3).astype(np.float32))\nprint(\"input:\\n\", workspace.FetchBlob(\"input\"), \"\\n\")\n\nworkspace.FeedBlob(\"scale\", np.array([1.5]).astype(np.float32))\nprint(\"scale: \", workspace.FetchBlob(\"scale\"))\n\nworkspace.FeedBlob(\"bias\", np.array([1.]).astype(np.float32))\nprint(\"bias: \", workspace.FetchBlob(\"bias\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"output:\\n\", workspace.FetchBlob(\"output\"))\n\n```\n\n**Result**\n\n```\n\ninput:\n [[[[ 0.97856593 -1.1832817 -0.2540021 ]\n [-1.3315694 -0.7485018 0.3787225 ]\n [-0.6826597 -1.4637762 0.57116514]]]\n\n\n [[[-0.44948956 0.85544354 -0.9315333 ]\n [-0.37202677 -0.22266895 -0.27194235]\n [ 0.4948163 -0.7296504 1.3393803 ]]]]\n\nscale: [1.5]\nbias: [1.]\noutput:\n [[[[ 3.5017493 -0.3791256 1.2890853 ]\n [-0.6453266 0.40137637 2.4249308 ]\n [ 0.5195738 -0.8826599 2.7703972 ]]]\n\n\n [[[ 0.12639964 2.856744 -0.8821926 ]\n [ 0.28847694 0.60098207 0.49788612]\n [ 2.1021945 -0.45978796 3.869297 ]]]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 1e-05,
+ "description": "The epsilon value to use to avoid division by zero.",
+ "name": "epsilon",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "default": "NCHW",
+ "description": "Specifies the order of the input data blob, where $N$ is batch size, $C$ is number of channels, $H$ is spatial height, and $W$ is spatial width. The only other valid option is \"NHWC\".",
+ "name": "order",
+ "option": "optional",
+ "type": "string"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The input 4-dimensional NCHW tensor to be operated on.",
+ "name": "input"
+ },
+ {
+ "description": "The input 1-dimensional scale tensor of size *C*.",
+ "name": "scale"
+ },
+ {
+ "description": "The input 1-dimensional bias tensor of size *C*.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output 4-dimensional tensor of the same shape as input.",
+ "name": "output"
+ },
+ {
+ "description": "(Optional) Saved mean used during training to speed up gradient computation. Should not be used for testing.",
+ "name": "saved_mean"
+ },
+ {
+ "description": "(Optional) Saved inverse stdev used during training to speed up gradient computation. Should not be used for testing.",
+ "name": "saved_inv_stdev"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "InstanceNormGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Add",
+ "description": "\n Performs element-wise binary Add (with no broadcast support).\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "Second operand. It should be of the same size as A.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Result, has same dimensions and type as A",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8AddRelu",
+ "description": "\n Performs element-wise binary Add (with no broadcast support). \"\n \"Output will go through rectified linear \"\n \"function, where y = max(0, x).\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "Second operand. It should be of the same size as A.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Result, has same dimensions and type as A",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8AveragePool",
+ "category": "Pool",
+ "description": "AveragePool \nconsumes an input blob X and applies average pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Average pooling consisting of averaging all values of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data tensor from average pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8AveragePoolRelu",
+ "description": "AveragePool \nconsumes an input blob X and applies average pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Average pooling consisting of averaging all values of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data tensor from average pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linear function, where y = max(0, x).",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8ChannelShuffle",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Concat",
+ "description": "Concatenate a list of tensors into a single tensor",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "description": "Which axis to concat on",
+ "name": "axis",
+ "option": "optional"
+ },
+ {
+ "description": "Pass 1 to add the axis specified in arg 'axis' to all input tensors",
+ "name": "add_axis",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Concatenated tensor",
+ "name": "concat_result"
+ },
+ {
+ "description": "The dimensions of the inputs.",
+ "name": "split_info"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Conv",
+ "category": "Layer",
+ "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \n[Only NHWC order is supported now]Note that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is convolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_op_impl.h is the templated implementation of the conv_op.h file, which is\nwhy they are separate files.\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "name": "pad"
+ },
+ {
+ "default": 1,
+ "name": "stride"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data blob from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the NCHW usage. On the other hand, the NHWC Op has a different set of dimension constraints. ",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the convolution; has size (M).",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8ConvRelu",
+ "description": "\nThe convolution operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \n[Only NHWC order is supported now]Note that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is convolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_op_impl.h is the templated implementation of the conv_op.h file, which is\nwhy they are separate files.\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data blob from previous layer; has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that this is for the NCHW usage. On the other hand, the NHWC Op has a different set of dimension constraints. ",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob that will be used in the convolutions; has size (M x C x kH x kW), where C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the convolution; has size (M).",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths. Output will go through rectified linear function, where y = max(0, x).",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8ConvTranspose",
+ "description": "\nThe transposed convolution consumes an input vector, the filter blob, and\nthe bias blob, and computes the output. Note that other parameters, such as\nthe stride and kernel size, or the pads' sizes in each direction are not\nnecessary for input because they are provided by the\nConvTransposeUnpoolOpBase operator. Various dimension checks are done\nimplicitly, and the sizes are specified in the Input docs for this operator.\nAs is expected, the filter is deconvolved with a subset of the\nimage and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nconv_transpose_op_impl.h is the templated implementation of the\nconv_transpose_op.h file, which is why they are separate files.\n ",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data blob from previous layer; has size (N x H x W x C), where N is the batch size, C is the number of channels, and H and W are the height and width. Note that NHWC is supported now",
+ "name": "X"
+ },
+ {
+ "description": "The filter blob that will be used in the transposed convolution; has size (M x kH x kW x C), where C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the convolution;has size (C). Optional, if not passed, will treat it as all 0.",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the transposed convolution. The output dimensions are functions of the kernel size, stride size, and pad lengths.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Dequantize",
+ "inputs": [
+ {
+ "description": "Int8 Tensor qX.",
+ "name": "qX"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "FP32 Tensor that represents mapped real value of qX.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8FC",
+ "category": "Layer",
+ "description": "\nComputes the result of passing an input vector X into a fully\nconnected layer with 2D weight matrix W and 1D bias vector b. That is,\nthe layer computes Y = X * W^T + b, where X has size (M x K),\nW has size (N x K), b has size (N), and Y has size (M x N),\nwhere M is often the batch size.\n\n\nNOTE: X does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\nX \\in [a_0, a_1 * ... * a_{n-1}]. Only this case is supported!\nLastly, even though b is a 1D vector of size N, it is copied/resized to\nbe size (M x N) implicitly and added to each vector in the batch.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "input tensor that's coerced into a 2D matrix of size (MxK) as described above",
+ "name": "X"
+ },
+ {
+ "description": "A tensor that is coerced into a 2D blob of size (KxN) containing fully connected weight matrix",
+ "name": "W"
+ },
+ {
+ "description": "1D blob containing bias vector",
+ "name": "b"
+ },
+ {
+ "description": "Optional scale quantization param computed on activation histogram dataWill overwrite Y_scale argument if specified",
+ "name": "Scale qparam"
+ },
+ {
+ "description": "Optionsl zero-point quantization param computed on activation dataWill overwrite Y_zero_point argument if specified",
+ "name": "Zero-point qparam"
+ },
+ {
+ "description": "Optional Qparam blob that contains quant param computed on activation histogram dataWill overwrite Y_scale and Y_zero_point argument if specified",
+ "name": "Qparam"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2D output tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8FCPackWeight",
+ "description": "Prepack weight for Int8FC",
+ "attributes": [
+ {
+ "description": "See FC operator",
+ "name": "axis_w",
+ "option": "optional"
+ },
+ {
+ "description": "Default false. Per output channel quantization",
+ "name": "quantize_channelwise",
+ "option": "optional"
+ },
+ {
+ "description": "Default false. Store unpacked quantized weights to W_q.original_tensor",
+ "name": "save_unpacked_weights",
+ "option": "optional"
+ },
+ {
+ "description": "The scale of input activation tensor. Only meaningful when bias is provided (NOTE: this is not the scale of weight",
+ "name": "in_scale",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Weight tensor in KRSC layout",
+ "name": "W"
+ },
+ {
+ "description": "Bias tensor",
+ "name": "b"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Weight/bias tensor in a packed format with type Int8FCDNNLowPPackedWeightBlob",
+ "name": "W_q"
+ },
+ {
+ "description": "Bias int32 quantized tensor",
+ "name": "B_q"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Flatten",
+ "description": "\nFlattens the input tensor into a 2D matrix. If input tensor has shape\n(d_0, d_1, ... d_n) then the output will have shape\n(d_0 X d_1 ... d_(axis-1), d_axis X d_(axis+1) ... X dn)\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "description": "(Default to 1) Indicate up to which input dimensions (exclusive) should be flattened to the outer dimension of the output",
+ "name": "axis",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "A Int8 tensor of rank >= axis.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "A 2D Int8 tensor with the contents of the input tensor, with input dimensions up to axis flattened to the outer dimension of the output and remaining input dimensions flattened into the inner dimension of the output.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8GenQuantParams",
+ "description": "Operator wrapper for generating int8 tensor quantization parameters given the input data and quant scheme",
+ "inputs": [
+ {
+ "description": "The input data, or last N samples of the output activations.",
+ "name": "X"
+ },
+ {
+ "description": "Int8QuantSchemeBlob that specifies the quantization kind and preserve_sparsity options when generating the quant params.",
+ "name": "quant_scheme"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Int8QuantParamsBlob that contains the scale and zero_point info in TensorQuantizationParams type.",
+ "name": "quant_param"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8GivenIntTensorFill",
+ "description": "\n Creates quantized tensor of type int32 with scale and zero point info.\n",
+ "attributes": [
+ {
+ "description": "Input array of type int32",
+ "name": "values",
+ "option": "optional"
+ },
+ {
+ "description": "Input tensor shape",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "An Int8TensorCPU with scale and zero point info",
+ "name": "Tensor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8GivenTensorFill",
+ "description": "\n Creates quantized tensor of type char(byte) with scale and zero point info.\n",
+ "attributes": [
+ {
+ "description": "Input array of type char(byte)",
+ "name": "values",
+ "option": "optional"
+ },
+ {
+ "description": "Input tensor shape",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "An Int8TensorCPU with scale and zero point info",
+ "name": "Tensor"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8LeakyRelu",
+ "description": "\nLeakyRelu takes input data (Tensor) and an argument alpha, and produces one\noutput data (Tensor) where the function `f(x) = alpha * x for x < 0`,\n`f(x) = x for x >= 0`, is applied to the data tensor elementwise.\n",
+ "attributes": [
+ {
+ "description": "Coefficient of leakage, default value is 0.01",
+ "name": "alpha",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D input tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8MaxPool",
+ "description": "MaxPool \nconsumes an input blob X and applies max pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data tensor from max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linear",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8MaxPoolRelu",
+ "description": "MaxPool \nconsumes an input blob X and applies max pooling across the\nthe blob according to kernel sizes, stride sizes, and pad lengths defined by the\nConvPoolOpBase operator. Max pooling consisting of taking the maximum value of a\nsubset of the input tensor according to the kernel size and downsampling the\ndata into the output blob Y for further processing.\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input data tensor from the previous operator; dimensions depend on whether the NCHW or NHWC operators are being used. For example, in the former, the input has size (N x C x H x W), where N is the batch size, C is the number of channels, and H and W are the height and the width of the data. The corresponding permutation of dimensions is used in the latter case.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data tensor from max pooling across the input tensor. Dimensions will vary based on various kernel, stride, and pad sizes. Output will go through rectified linearfunction, where y = max(0, x).",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Quantize",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "FP32 Tensor X.",
+ "name": "X"
+ },
+ {
+ "description": "Optional scale quantization param computed on activation histogram dataWill overwrite Y_scale argument if specified",
+ "name": "Scale qparam"
+ },
+ {
+ "description": "Optionsl zero-point quantization param computed on activation dataWill overwrite Y_zero_point argument if specified",
+ "name": "Zero-point qparam"
+ },
+ {
+ "description": "Optional Qparam blob that contains quant param computed on activation histogram dataWill overwrite Y_scale and Y_zero_point argument if specified",
+ "name": "Qparam"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Int8 Tensor qX representing X with linear quantization.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Relu",
+ "category": "Activation",
+ "description": "\nRelu takes one input data (Tensor) and produces one output data\n(Tensor) where the rectified linear function, y = max(0, x), is applied to\nthe tensor elementwise.\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D input tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Reshape",
+ "description": "\nReshape the input tensor similar to numpy.reshape.\n\nIt takes a tensor as input and an optional tensor specifying the new shape.\nWhen the second input is absent, an extra argument `shape` must be specified.\nIt outputs the reshaped tensor as well as the original shape.\n\nAt most one dimension of the new shape can be -1. In this case, the value is\ninferred from the size of the tensor and the remaining dimensions. A dimension\ncould also be 0, in which case the actual dimension value is going to be copied\nfrom the input tensor.\n",
+ "attributes": [
+ {
+ "description": "New shape",
+ "name": "shape",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "An input tensor.",
+ "name": "data"
+ },
+ {
+ "description": "New shape.",
+ "name": "new_shape"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Reshaped data.",
+ "name": "reshaped"
+ },
+ {
+ "description": "Original shape.",
+ "name": "old_shape"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8ResizeNearest",
+ "description": "\nResizes the spatial dimensions of the input using nearest neighbor\ninterpolation. The `width_scale` and `height_scale` arguments\ncontrol the size of the output, which is given by:\noutput_width = floor(input_width * width_scale)\noutput_height = floor(output_height * height_scale)\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "description": "Scale along width dimension",
+ "name": "width_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Scale along height dimension",
+ "name": "height_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output dimensions (HxW). If specified this takes precedence over scale values.",
+ "name": "output_size",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input Int8 tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output Int8 tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8RoIAlign",
+ "description": "\nRegion of Interest (RoI) align operation as used in Mask R-CNN.\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "description": "(float) default 1.0; Spatial scale of the input feature map X relative to the input image. E.g., 0.0625 if X has a stride of 16 w.r.t. the input image.",
+ "name": "spatial_scale",
+ "option": "optional"
+ },
+ {
+ "description": "(int) default 1; Pooled output Y's height.",
+ "name": "pooled_h",
+ "option": "optional"
+ },
+ {
+ "description": "(int) default 1; Pooled output Y's width.",
+ "name": "pooled_w",
+ "option": "optional"
+ },
+ {
+ "description": "(int) default -1; number of sampling points in the interpolation grid used to compute the output value of each pooled output bin. If > 0, then exactly sampling_ratio x sampling_ratio grid points are used. If <= 0, then an adaptive number of grid points are used (computed as ceil(roi_width / pooled_w), and likewise for height).",
+ "name": "sampling_ratio",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "4D Int8 Tensor feature map input of shape (N, C, H, W).",
+ "name": "X"
+ },
+ {
+ "description": "2D input of shape (R, 4 or 5) specifying R RoIs representing: batch index in [0, N - 1], x1, y1, x2, y2. The RoI coordinates are in the coordinate system of the input image. For inputs corresponding to a single image, batch index can be excluded to have just 4 columns.",
+ "name": "RoIs"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "4D Int8 Tensor output of shape (R, C, pooled_h, pooled_w). The r-th batch element is a pooled feature map cooresponding to the r-th RoI.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Sigmoid",
+ "description": "\nApply the Sigmoid function element-wise to the input tensor. This is often used\nas a non-linear activation function in a neural network. The sigmoid function is\ndefined as:\n\n$$Sigmoid(x) = \\frac{1}{1+\\exp(-x)}$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/sigmoid_op.cc\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The sigmoid normalized output values with the same shape as input tensor.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Slice",
+ "description": "\nProduces a slice of the input Int8 tensor. Currently, only slicing in a single\ndimension is supported.\nSlices are passed as 2 1D vectors or as two keyword argument lists with starting\nand end indices for each dimension of the input `data` tensor. If a negative\nvalue is passed for any of the start or end indices, it represents the number of\nelements before the end of that dimension. End indices are non-inclusive unless\nnegative (end index -1 means up to and including the last element).\n\n\nExample:\n\n data = [\n [1, 2, 3, 4],\n [5, 6, 7, 8],\n ]\n starts = [0, 1]\n ends = [-1, 3]\n\n result = [\n [2, 3],\n [6, 7],\n ]\n",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "description": "List of starting indices",
+ "name": "starts",
+ "option": "optional"
+ },
+ {
+ "description": "List of ending indices",
+ "name": "ends",
+ "option": "optional"
+ },
+ {
+ "description": "(Optional) The dimension to slice over. If specified start_idx and end_idx should also be given and it takes precedence over starts and ends",
+ "name": "dim",
+ "option": "optional"
+ },
+ {
+ "description": "(Optional) The dimension to start slice from. Default is 0",
+ "name": "start_idx",
+ "option": "optional"
+ },
+ {
+ "description": "(Optional) The dimension to end the slice. Default is -1",
+ "name": "end_idx",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Int8 Tensor of data to extract slices from.",
+ "name": "data"
+ },
+ {
+ "description": "1D tensor: start-indices for each dimension of data.",
+ "name": "starts"
+ },
+ {
+ "description": "1D tensor: end-indices for each dimension of data.",
+ "name": "ends"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Sliced Int8 data tensor.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Softmax",
+ "category": "Activation",
+ "description": "\nThe operator computes the softmax normalized values for each layer in the batch\n of the given input. The input is a 2-D tensor (Tensor) of size\n(batch_size x input_feature_dimensions). The output tensor has the same shape\nand contains the softmax normalized values of the corresponding input.\n\nX does not need to explicitly be a 2D vector; rather, it will be\ncoerced into one. For an arbitrary n-dimensional tensor\nX \\in [a_0, a_1, ..., a_{k-1}, a_k, ..., a_{n-1}] and k is\nthe axis provided, then X will be coerced into a 2-dimensional tensor with\ndimensions [a_0 * ... * a_{k-1}, a_k * ... * a_{n-1}]. For the default\ncase where axis=1, this means the X tensor will be coerced into a 2D tensor\nof dimensions [a_0, a_1 * ... * a_{n-1}], where a_0 is often the batch size.\nIn this situation, we must have a_0 = N and a_1 * ... * a_{n-1} = D.\nEach of these dimensions must be matched correctly, or else the operator\nwill throw errors.\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ },
+ {
+ "description": "(int) default to 1; describes the axis of the inputs when coerced to 2D; defaults to one because the 0th axis most likely describes the batch_size",
+ "name": "axis",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The input tensor that's coerced into a 2D matrix of size (NxD) as described above.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The softmax normalized output values with the same shape as input tensor.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Sum",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8SumRelu",
+ "attributes": [
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Int8Transpose",
+ "description": "\nTranspose the input tensor by permuting the axes of the input according\nto the `axes` argument. Similar to numpy's\n[transpose](https://docs.scipy.org/doc/numpy/reference/generated/numpy.transpose.html)\nfunction.\n\nFor example, when axes=(1, 0, 2), given an input tensor of shape\n(1, 2, 3), the output shape will be (2, 1, 3).\n",
+ "attributes": [
+ {
+ "description": "Order to permute axes of input tensor. Reverses the dimensions by default.",
+ "name": "axes",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization scale",
+ "name": "Y_scale",
+ "option": "optional"
+ },
+ {
+ "description": "Output tensor quantization offset",
+ "name": "Y_zero_point",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Transposed output",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IntegralImage",
+ "description": "\nComputes an integral image, which contains the sum of pixel values within\nan image vertically and horizontally. This integral image can then be used\nwith other detection and tracking techniques.\n",
+ "inputs": [
+ {
+ "description": "Images tensor of the form (N, C, H, W)",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Integrated image of the form (N, C, H+1, W+1)",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IntegralImageGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "IntIndexCreate",
+ "description": "\nCreates a dictionary that maps int32 keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n",
+ "attributes": [
+ {
+ "description": "Max number of elements, including the zero entry.",
+ "name": "max_elements",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handler"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IsEmpty",
+ "description": "\nThe *IsEmpty* op accepts a single input $tensor$, and produces a single boolean output $is\\_empty$. The output is *True* if and only if $tensor$ has size == 0.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"IsEmpty\",\n [\"tensor\"],\n [\"is_empty\"],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"tensor\", np.random.randn(2, 2).astype(np.float32))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"is_empty: \", workspace.FetchBlob(\"is_empty\"),\"\\n\")\n\n// Use an empty tensor\nworkspace.FeedBlob(\"tensor\", np.empty(0))\nprint(\"tensor:\\n\", workspace.FetchBlob(\"tensor\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"is_empty: \", workspace.FetchBlob(\"is_empty\"))\n\n```\n\n**Result**\n\n```\n\ntensor:\n [[ 0.26018378 0.6778789 ]\n [-1.3097627 -0.40083608]]\nis_empty: False\n\ntensor:\n []\nis_empty: True\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "Input data tensor to check if empty.",
+ "name": "tensor"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output scalar boolean tensor. True if input has size == 0.",
+ "name": "is_empty"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IsMemberOf",
+ "description": "\nThe *IsMemberOf* op takes an input tensor *X* and a list of values as argument, and produces one output data tensor *Y*. The output tensor is the same shape as *X* and contains booleans. The output is calculated as the function *f(x) = x in value* and is applied to *X* elementwise.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/elementwise_logical_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/elementwise_logical_ops.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"IsMemberOf\",\n [\"X\"],\n [\"Y\"],\n value=[0,2,4,6,8],\n)\n\n// Use a not-empty tensor\nworkspace.FeedBlob(\"X\", np.array([0,1,2,3,4,5,6,7,8]).astype(np.int32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y: \\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n// value=[0,2,4,6,8]\n\nX:\n [0 1 2 3 4 5 6 7 8]\nY:\n [ True False True False True False True False True]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "List of values to check for membership.",
+ "name": "value",
+ "option": "optional"
+ },
+ {
+ "description": "The data type for the elements of the output tensor. Strictly must be one of the types from DataType enum in TensorProto.",
+ "name": "dtype",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor of any shape",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor (same size as X containing booleans)",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "IsNaN",
+ "description": "Returns a new tensor with boolean elements representing if each element is NaN or not.",
+ "inputs": [
+ {
+ "description": "Tensor to check for nan",
+ "name": "tensor"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor containing a 1 at each location of NaN elements.",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Iter",
+ "description": "\nStores a singe integer, that gets incremented on each call to Run().\nUseful for tracking the iteration count during SGD, for example.\n",
+ "support_level": "default"
+ },
+ {
+ "name": "KeySplit",
+ "support_level": "default"
+ },
+ {
+ "name": "KeyValueToMap",
+ "description": "Convert key and value blob pairs into a map blob",
+ "inputs": [
+ {
+ "description": "Blob reference to the key",
+ "name": "key blob"
+ },
+ {
+ "description": "Blob reference to the value",
+ "name": "value blob"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Blob reference to the map",
+ "name": "map blob"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "L1Distance",
+ "description": "\nComputes the row-wise L1 Distance between the two input tensors $X$ and $Y$, which is defined as\n\n$$L1Distance(\\mathbf{x},\\mathbf{y}) = \\sum_{i}\\mid x_i - y_i\\mid$$\n\nNote, both inputs must either be 1-dimensional or 2-dimensional and both must have the same shape. The output $Z$ will be 1-dimensional regardless and its length will equal the number of rows in the inputs.\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/distance_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"L1Distance\",\n [\"X\", \"Y\"],\n [\"Z\"]\n)\n\n// Create X\nX = 5*np.ones((1, 4))\nprint(\"X:\\n\",X)\n\n// Create Y\nY = np.ones((1, 4))\nprint(\"Y:\\n\",Y)\n\n// Feed X & Y into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"Y\", Y.astype(np.float32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Z:\\n\", workspace.FetchBlob(\"Z\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[5. 5. 5. 5.]]\nY:\n [[1. 1. 1. 1.]]\nZ:\n [16.]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "First input tensor. (1D or 2D)",
+ "name": "X"
+ },
+ {
+ "description": "Second input tensor. (must have the same shape as $X$)",
+ "name": "Y"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor. One value for each row of the inputs.",
+ "name": "Z"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "L1DistanceGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LabelCrossEntropy",
+ "description": "\nThis operator computes the cross entropy between a $NxD$ dimensional input data tensor $X$ and a one dimensional input label tensor $label$. The op produces a single length $N$ output tensor $Y$. Here, $N$ is considered the batch size and $D$ is the size of each element in the batch. In practice, it is most commonly used at the end of models as a part of the loss computation, after the SoftMax operator and before the AveragedLoss operator. The cross entropy operation is defined as follows\n\n$$Y_i = -log(X_{ij})$$\n\nwhere ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nThe difference between *LabelCrossEntropy* and *CrossEntropy* is how the labels are specified. Here, the labels are a length $N$ list of integers, whereas in CrossEntropy the labels are a $NxD$ dimensional matrix of one hot label vectors. However, the results of computation should be the same, as shown in the two examples where ($i$, $j$) is the classifier's prediction of the $j$th class (the correct one), and $i$ is the batch size. Each log has a lower limit for numerical stability.\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/cross_entropy_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LabelCrossEntropy\",\n [\"X\", \"label\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([[.01, .05, .02, .02, .9],[.03, .1, .42, .05, .4]])\nprint(\"X:\\n\",X)\n\n// Create label: Sample 1-hot ground truth label vectors\nlabel = np.array([4,2])\nprint(\"label:\\n\",label)\n\n// Feed X & label into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\nworkspace.FeedBlob(\"label\", label.astype(np.int32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[0.01 0.05 0.02 0.02 0.9 ]\n [0.03 0.1 0.42 0.05 0.4 ]]\nlabel:\n [4 2]\nY:\n [0.10536055 0.8675006 ]\n\n```\n\n \n\n\n",
+ "inputs": [
+ {
+ "description": "Input tensor which is almost always the result of a softmax operation. $X$ is a 2D array of size $NxD$, where $N$ is the batch size and $D$ is the number of classes.",
+ "name": "X"
+ },
+ {
+ "description": "Blob containing the labels used to compare the input. $label$ is a length $N$ list of integers, where each element is the integer label for the $n$th element of the batch.",
+ "name": "label"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output blob from the cross entropy computation. $Y$ is 1D length $N$ tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LabelCrossEntropyGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LambdaRankNdcg",
+ "description": "\nIt implements the LambdaRank as appeared in Wu, Qiang, et al. \"Adapting boosting\nfor information retrieval measures.\" Information Retrieval 13.3 (2010): 254-270.\n\nThis method heuristically optimizes the NDCG.\n",
+ "support_level": "default"
+ },
+ {
+ "name": "LambdaRankNdcgGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Lars",
+ "description": "\nImplement Layer-wise Adaptive Rate Scaling (LARS) with clipping. Before adding weight\ndecay, given a parameter tensor X and its gradient dX, the local learning rate\nfor X will be\n\nlocal_lr = trust * norm(X) / ( norm(dX) + wd * norm(X) + offset * norm(X) )\n\n = trust / ( norm(dX) / norm(X) + wd + offset ),\n\nwhere offset is a preset hyper-parameter to avoid numerical issue and trust\nindicates how much we trust the layer to change its parameters during one update.\nIn this implementation, we uses l2 norm and the computed local learning rate is\nclipped based on the upper bound lr_max and the lower bound lr_min:\n\nlocal_lr = min(local_lr, lr_max) and local_lr = max(local_lr, lr_min)\n\n",
+ "attributes": [
+ {
+ "description": "rescaling offset parameter",
+ "name": "offset",
+ "option": "optional"
+ },
+ {
+ "description": "minimum learning rate for clipping",
+ "name": "lr_min",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Parameter tensor",
+ "name": "X"
+ },
+ {
+ "description": "Gradient tensor",
+ "name": "dX"
+ },
+ {
+ "description": "Weight decay",
+ "name": "wd"
+ },
+ {
+ "description": "Trust",
+ "name": "trust"
+ },
+ {
+ "description": "Upper bound of learning rate",
+ "name": "lr_max"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Rescaled local learning rate",
+ "name": "lr_rescaled"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LastNWindowCollector",
+ "description": "\nCollect the last N rows from input data. The purpose is to keep track of data\naccross batches, so for example suppose the LastNWindowCollector is called\nsuccessively with the following input data\n\n [1, 2, 3, 4]\n [5, 6, 7]\n [8, 9, 10, 11]\n\nAnd the number of items is set to 6, then the output after the 3rd call\nwill contain the following elements:\n\n [6, 7, 8, 9, 10, 11]\n\nNo guarantee is made on the ordering of elements in input. So a valid value for\noutput could have been\n\n [11, 10, 9, 8, 7, 6]\n\nAlso, this method works for any order tensor, treating the first dimension as\ninput rows and keeping the last N rows seen as input. So for instance:\n\n [[1, 2], [2, 3], [3, 4], [4, 5]]\n [[5, 6], [6, 7], [7, 8]]\n [[8, 9], [9, 10], [10, 11], [11, 12]]\n\nA possible output would be\n\n [[6, 7], [7, 8], [8, 9], [9, 10], [10, 11], [11, 12]]\n\nThis is not thread safe unless a mutex is given.\n",
+ "attributes": [
+ {
+ "description": "The number of random samples to append for each positive samples",
+ "name": "num_to_collect",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The buffer for last-N record. Should be initialized to empty tensor",
+ "name": "last-N buffer"
+ },
+ {
+ "description": "The cursor pointing to the next position that should be replaced. Should be initialized to 0.",
+ "name": "next cursor"
+ },
+ {
+ "description": "tensor to collect from",
+ "name": "DATA"
+ },
+ {
+ "description": "(optional) mutex to use to make this thread-safe",
+ "name": "MUTEX"
+ },
+ {
+ "description": "",
+ "name": "NUM_VISITED"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Data stored in sessions",
+ "name": "last-N buffer"
+ },
+ {
+ "description": "Updated input cursor",
+ "name": "next cursor"
+ },
+ {
+ "description": "number of records seen so far",
+ "name": "NUM_VISITED"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LayerNorm",
+ "description": "\nComputes layer normalization as described in https://arxiv.org/pdf/1607.06450.pdf.\nGiven an input vector x \\in [a_0, a_1, ...,a_{k-1}, a_k, ..., a_{n-1}],\nthis op treats dimensions a_k through a_{n-1} as feature vectors. For each\nfeature vector, the op contains the mean and standard deviation. Then,\nit returns the normalized values (with respect to the feature vector).\n\nNote that this op does not contain the scale an bias terms described in the\npaper. Simply follow this op with an FC op to add those. Concretely, this op\nimplements:\n\nh = \\frac{1}{\\sigma}(a - \\mu)\nwhere \\mu = \\frac{1}{H}\\sum_{i=1}^{H} a_i\nand \\sigma = \\sqrt{\\frac{1}{H}\\sum_{i=1}^{H}(a_i - \\mu)^2}\nwhere H is the number of hidden units (i.e. product of dimensions from 'axis'\nto the end.)\n",
+ "attributes": [
+ {
+ "description": "(int) default to 1; Describes axis of the inputs. Defaults to one because the 0th axis most likely describes the batch size",
+ "name": "axis",
+ "option": "optional"
+ },
+ {
+ "description": "(float) default to 0.001. Small value to be added to the stdev when dividing out by that value. This prevents division by zero.",
+ "name": "epsilon",
+ "option": "optional"
+ },
+ {
+ "description": "(bool) default to False; If true, this op will do affine transformation after normalization.",
+ "name": "elementwise_affine",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor which layer normalization will be applied to",
+ "name": "input"
+ },
+ {
+ "description": "scale tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis",
+ "name": "gamma"
+ },
+ {
+ "description": "bias tensor for elementwise_affine, the shape should be the same as the dimensions of X begin from axis",
+ "name": "beta"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Normalized values",
+ "name": "output"
+ },
+ {
+ "description": "Mean values for each feature vector",
+ "name": "mean"
+ },
+ {
+ "description": "Standard deviations for each feature vector",
+ "name": "stddev"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LayerNormGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LC",
+ "description": "\nThe locally connected operator consumes an input vector, a filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n",
+ "inputs": [
+ {
+ "name": null
+ },
+ {
+ "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LC1D",
+ "description": "\nThe locally connected operator consumes an input vector, a 1D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n",
+ "inputs": [
+ {
+ "name": null
+ },
+ {
+ "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LC1DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LC2D",
+ "description": "\nThe locally connected operator consumes an input vector, a 2D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n",
+ "inputs": [
+ {
+ "name": null
+ },
+ {
+ "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LC2DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LC3D",
+ "description": "\nThe locally connected operator consumes an input vector, a 3D filter blob\nand a bias blob and computes the output. \nNote that other parameters, such as the stride and\nkernel size, or the pads' sizes in each direction are not necessary for input\nbecause they are provided by the ConvPoolOpBase operator. Various dimension\nchecks are done implicitly, and the sizes are specified in the Input docs for\nthis operator. As is expected, the filter is locally connected with a subset of\nthe image and the bias is added; this is done throughout the image data and the\noutput is computed. As a side note on the implementation layout:\nlocally_connected_op_impl.h is the templated implementation of the\nlocally_connected_op.h file, which is why they are separate files.\n",
+ "inputs": [
+ {
+ "name": null
+ },
+ {
+ "description": "The filter blob that will be used in the locally connected op; has size (YH * YW * M x C x kH x kW) if order == NCHW else (YH * YW * M * KH * KW * C), where YH and YW are the height and width of the output image, C is the number of channels, and kH and kW are the height and width of the kernel.",
+ "name": "filter"
+ },
+ {
+ "description": "The 1D bias blob that is added through the locally connected op; has size (YH * YW * M).",
+ "name": "bias"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output data blob that contains the result of the locally connected op.The output dimensions are functions of the kernel size, stride size, and pad lengths.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LC3DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LCGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LE",
+ "description": "\nPerforms element-wise less or equal than comparison **<=** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LE\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [ True False True True True True]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LeakyRelu",
+ "description": "\nThe *LeakyRelu* op takes one input tensor $X$ and an argument $alpha$, and produces one output tensor $Y$ of the same shape as $X.$ The op performs the element wise leaky relu operation, defined as\n\n$$y=LeakyRelu(x) =\\begin{cases}\\alpha x & x < 0\\\\x & otherwise\\end{cases}$$\n\nThe default value of *alpha* is 0.01.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/leaky_relu_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LeakyRelu\",\n [\"X\"],\n [\"Y\"],\n alpha=0.01\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(3, 3).astype(np.float32))\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[-0.91060215 0.09374836 2.1429708 ]\n [-0.748983 0.19164062 -1.5130422 ]\n [-0.29539835 -0.8530696 0.7673204 ]]\n\nY:\n [[-0.00910602 0.09374836 2.1429708 ]\n [-0.00748983 0.19164062 -0.01513042]\n [-0.00295398 -0.0085307 0.7673204 ]]\n\n```\n\n \n\n\n",
+ "attributes": [
+ {
+ "default": 0.01,
+ "description": "Coefficient of leakage.",
+ "name": "alpha",
+ "option": "optional",
+ "type": "float32"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor of data to be operated on.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output tensor, calculated as described above.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LeakyReluGradient",
+ "attributes": [
+ {
+ "description": "Coefficient of leakage",
+ "name": "alpha",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LearningRate",
+ "description": "\nLearning rate is a decreasing function of time. With low learning rates the\nimprovements will be linear. With high learning rates they will start to look\nmore exponential. Learning rate is controlled by the following arguments:\n\n\nRequired:\n `iterations`\n `base_lr`: base learning rate\n `policy`: this controls how the learning rate is applied, options are:\n `fixed`\n `step`: uses `stepsize`, `gamma`\n `exp`: uses `gamma`\n `gate`: uses 'multiplier_1', 'multiplier_2', `num_iter``\n `inv`: uses `gamma`, `power`\n `linearWarmup`: uses `start_multiplier`, `num_iter`\n `constantWarmup`: uses `multiplier`, `num_iter`\n `alter`: uses `active_first`, `active_period`, `inactive_period`\n `hill`: uses those in both `linearWarmup` and `inv`, plus `end_multiplier`\n `composite`: uses `sub_policy_num_iters` and additional args with format\n `cyclic`: uses `max_lr`, `stepsize`\n `cosine`: uses `min_lr`, `max_lr`, `period`, `t_mult`, `lr_shrink`\n `constantThenLinearWarmup`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`\n `compositeCyclical`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`, `cyclical_max_lr`, `cyclical_step_size`, `cyclical_decay`\n `compositeCosine`: uses `start_warmup_multiplier`, `constant_warmup_num_iter`, `linear_warmup_num_iter`, `cosine_max_lr`, `cosine_period`, `cosine_t_mult`, `cosine_lr_shrink`\n sub_policy_{sub_policy_index}_{sub_policy_arg}, for example:\n sub_policy_0_policy: \"exp\", sub_policy_0_gamma: 0.99,\n sub_policy_0_lr_scale: 1.2\n sub_policy_0_policy: \"fixed\", sub_policy_0_lr_scale: 1.0\n sub_policy_num_iters: [1000, 1000]\n\nOptional:\n `stepsize`: defaults to 0\n `max_lr`: defaults to 0.005\n `gamma`: defaults to 0\n `power`: defaults to 0\n `num_iter`: defaults to 0\n `start_multiplier`: defaults to 0\n `multiplier`: defaults to 0.5\n `multiplier_1`: defaults to 1\n `multiplier_2`: defaults to 1\n `m1`: defaults to 0.5, the first piece lr of piece warmup\n `n1`: defaults to 0, iter threshold of the first piece lr\n `m2`: defaults to 0.5, the second piece lr of piece warmup\n `n2`: defaults to 0, iter threshold of the second piece lr\n `m3`: defaults to 0.5, the third piece lr of piece warmup\n `start_warmup_multiplier`: defaults to 0.1, part of constantThenLinearWarmup\n `constant_warmup_num_iter`: defaults to 10000000, part of constantThenLinearWarmup and constantThenLinearWarmup\n `linear_warmup_num_iter`: defaults to 10000000, part of constantThenLinearWarmup, CompositeCyclicalLRPolicy, CompositeCosineLRPolicy\n `cyclical_max_lr`: defaults to 0.05, part of CompositeCyclicalLRPolicy\n `cyclical_step_size`: defaults to 1000000, part of CompositeCyclicalLRPolicy\n `cyclical_decay`: defaults to 1.0, part of CompositeCyclicalLRPolicy\n `cosine_min_lr`:defaults to 0.01, part of CompositeCosineLRPolicy\n `cosine_max_lr`:defaults to 0.05, part of CompositeCosineLRPolicy\n `cosine_period`:defaults to 50, part of CompositeCosineLRPolicy\n `cosine_t_mult`:defaults to 1.0, part of CompositeCosineLRPolicy\n `cosine_lr_shrink`:defaults to 0.99, part of CompositeCosineLRPolicy\n\nUsage:\n train_net.LearningRate(*iterations*, \"*label*\", base_lr=*float*,\n policy=\"policy_name\", stepsize=*int*, gamma=*float*)\n\n\nExample usage:\n train_net.LearningRate(200, \"LR\", base_lr=-0.1,\n policy=\"step\", stepsize=20, gamma=0.9)\n",
+ "attributes": [
+ {
+ "description": "(float, required) base learning rate",
+ "name": "base_lr",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 1.0) strategy for gamma enforcement",
+ "name": "policy",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 1.0) used only for inv policy type",
+ "name": "power",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 1.0) momentum of change",
+ "name": "gamma",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 1.0) sampling rate on iterations",
+ "name": "stepsize",
+ "option": "optional"
+ },
+ {
+ "description": "(boolean, default True) in alter policy",
+ "name": "active_first",
+ "option": "optional"
+ },
+ {
+ "description": "(int64_t, required) in alter policy",
+ "name": "active_period",
+ "option": "optional"
+ },
+ {
+ "description": "(int64_t, required) in alter policy",
+ "name": "inactive_period",
+ "option": "optional"
+ },
+ {
+ "description": "(int, default -1) maximum iterations in this training run",
+ "name": "max_iter",
+ "option": "optional"
+ },
+ {
+ "description": "(int, default 0) number of iterations over which to warmup lr",
+ "name": "num_iter",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 0) starting multiplier for learning rate",
+ "name": "start_multiplier",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 0) end multiplier for learning rate",
+ "name": "end_multiplier",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 0.5) constant multiplier for learning rate",
+ "name": "multiplier",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 1) start multiplier for learning rate",
+ "name": "multiplier_1",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 1) end multiplier for learning rate",
+ "name": "multiplier_2",
+ "option": "optional"
+ },
+ {
+ "description": "(int array, default empty) number of iterations for each sub learning rate policy in composite policy",
+ "name": "sub_policy_num_iters",
+ "option": "optional"
+ },
+ {
+ "description": "",
+ "name": "m1",
+ "option": "optional"
+ },
+ {
+ "description": "",
+ "name": "n1",
+ "option": "optional"
+ },
+ {
+ "description": "",
+ "name": "m2",
+ "option": "optional"
+ },
+ {
+ "description": "",
+ "name": "n2",
+ "option": "optional"
+ },
+ {
+ "description": "",
+ "name": "m3",
+ "option": "optional"
+ },
+ {
+ "description": "(float, default 0.005) max learning rate",
+ "name": "max_lr",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 0.1",
+ "name": "start_warmup_multiplier",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 10000000",
+ "name": "constant_warmup_num_iter",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 10000000",
+ "name": "linear_warmup_num_iter",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 0.05, part of CompositeCyclicalLRPolicy",
+ "name": "cyclical_max_lr",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 1000000, part of CompositeCyclicalLRPolicy",
+ "name": "cyclical_step_size",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 0.999, part of CompositeCyclicalLRPolicy",
+ "name": "cyclical_decay",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 0.01, part of CompositeCosineLRPolicy",
+ "name": "cosine_min_lr",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 0.05, part of CompositeCosineLRPolicy",
+ "name": "cosine_max_lr",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 50, part of CompositeCosineLRPolicy",
+ "name": "cosine_period",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 1,0, part of CompositeCosineLRPolicy",
+ "name": "cosine_t_mult",
+ "option": "optional"
+ },
+ {
+ "description": "defaults to 0.99, part of CompositeCosineLRPolicy",
+ "name": "cosine_lr_shrink",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "description needed",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "description needed",
+ "name": "output"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LearningRateAdaption",
+ "description": "\n Learning Rate Adaption is an operation that perform one iteration of\n gradient descent based on learning rate:\n lr(k) = lr(k-1) - lr_alpha * df(k-1)/dlr,\n where df(k-1)/dlr is the gradient of objective function f on lr, and\n lr_alpha is a learning rate hyperparameter. It can be prove that\n df(k-1)/dlr equals INNERPRODUCT(grad(k-1), -grad(k-2)), where grad(k-1) is\n the grad of f(k-1) on parameters. When the argument\n \"normalized_lr_adaption\" is false, we simply perform the\n following update:\n lr(k) = lr(k-1) - lr_alpha * INNERPRODUCT(grad(k-1), grad(k-2)).\n If we set \"normalized_lr_adaption\" to be true, we do not directly apply\n INNERPRODUCT(grad(k-1), -grad(k-2)) as the grad. Instead, we perform the\n following update:\n lr(k) = lr(k-1) + lr_alpha * cosineSimilarity(grad(k-1), grad(k-2)).\n",
+ "attributes": [
+ {
+ "description": "the learning rate for performing gradient descent on learning rate lr",
+ "name": "lr_alpha",
+ "option": "optional"
+ },
+ {
+ "description": "whether to apply normalized lr adaption or not",
+ "name": "normalized_lr_adaption",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Learning rate",
+ "name": "lr"
+ },
+ {
+ "description": "Gradient computed",
+ "name": "grad"
+ },
+ {
+ "description": "The effective grad",
+ "name": "effgrad"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Updated learning rate",
+ "name": "output_lr"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsGather",
+ "description": "\nGather items from sparse tensor. Sparse tensor is described by items and\nlengths. This operator gathers items corresponding to lengths at the given\nindices. This deliberately doesn't return lengths of OUTPUTS so that both lists\nand maps can be supported without special cases. If you need lengths tensor for\n OUTPUT, use `Gather`.\n\nExample:\n ITEMS = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n LENGTHS = [0, 2, 3, 1, 4]\n INDICES = [0, 2, 4]\n\n OUTPUT = [2, 3, 4, 6, 7, 8, 9]\n",
+ "inputs": [
+ {
+ "description": "items tensor",
+ "name": "ITEMS"
+ },
+ {
+ "description": "lengths tensor",
+ "name": "LENGTHS"
+ },
+ {
+ "description": "indices into LENGTHS where items should be gathered",
+ "name": "INDICES"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D tensor containing gathered items",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsIndicesInGradientMeanGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsIndicesInGradientSumGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsMax",
+ "description": "\nApplies 'Max' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Max' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nMax computes the element-wise max of the input slices. Operation doesn't change the shape of the individual blocks.\n\n\nThe *LengthsMax* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the maximum value in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [max([2,4]), max([3,1,2]), max([10])] = [4,3,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsMax\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 4. 3. 10.]\n\n```\n\n \n\n\n ",
+ "inputs": [
+ {
+ "description": "Input tensor, slices of which are aggregated.",
+ "name": "DATA"
+ },
+ {
+ "description": "Vector with the same sum of elements as the first dimension of DATA",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsMaxWithMainInputAndForwardOutputGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsMean",
+ "description": "\nApplies 'Mean' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Mean' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nMean computes the element-wise mean of the input slices. Operation doesn't change the shape of the individual blocks.\n\n\nThe *LengthsMean* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the mean value in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [mean([2,4]), mean([3,1,2]), mean([10])] = [3,2,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsMean\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 3. 2. 10.]\n\n```\n\n \n\n\n ",
+ "inputs": [
+ {
+ "description": "Input tensor, slices of which are aggregated.",
+ "name": "DATA"
+ },
+ {
+ "description": "Vector with the same sum of elements as the first dimension of DATA",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsMeanGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsPad",
+ "description": "\nGiven DATA tensor of rank r >= 1, and LENGTHS tensor of rank 1, pad each\nsegment in DATA with `value`, so that each segment's length is `target_length`.\nIf will throw, if there is segment of length larger than `target_length`.\n\nExample:\n DATA = [\n [2.3, 3.4],\n [4.5, 5.7],\n [6.8, 7.9],\n ]\n LENGTHS = [0, 1, 1, 1]\n and target_length = 2, padding value = -1.0\n OUTPUT = [\n [-1.0, -1.0],\n [-1.0, -1.0],\n [2.3, 3.4],\n [-1.0, -1.0],\n [4.5, 5.7],\n [-1.0, -1.0],\n [6.8, 7.9],\n [-1.0, -1.0],\n ]\n",
+ "attributes": [
+ {
+ "description": "The value to pad the data",
+ "name": "padding_value",
+ "option": "optional"
+ },
+ {
+ "description": "The target length of each segment",
+ "name": "target_length",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Tensor of rank r >= 1. First dimension must be equal to the size of lengths",
+ "name": "DATA"
+ },
+ {
+ "description": "Tensor of int32 lengths of rank 1",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Padded DATA tensor",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsPartition",
+ "description": "\nLengthsPartition splits the input int tensor into multiple ones according to the\nsecond tensor. The first dimension is expected to be the tensor that describes\nlengths of the elements.\n\nTakes the second input and partitions it to shards according to the remainder of\nvalues modulo the number of partitions. It requires the second tensor to be\na 1D-tensor of the integral type. The first tensor should be 1D-tensor of int32\nthat would represent the lengths of the elements in the input. The number of\npartitions is derived as (num_output / num_input).\n\nIf additional inputs are present they must have the same shape as the first\ninput, optionally with extra trailing dimensions. They will be partitioned\naccordingly to the first input.\n\nOptional arg 'pack_first_input' transforms the first tensor values as\nX_ij / num_partitions.\n\nOutputs are ordered as\nX_0_part_0, X_1_part_0, ..., X_N-1_part_0, X_0_part_1, ..., X_N-1_part_K-1\n",
+ "attributes": [
+ {
+ "description": "(int, default 0) If set, the operator transforms the first tensor values as floor(X_ij / num_partitions)",
+ "name": "pack_first_input",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor containing data to be partitioned. The number of input tensors might be greater than 1 but must have the same shape as the previous tensors.",
+ "name": "input"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output Partitions. The number of output tensors has to be a multiple of the number of input tensors.",
+ "name": "partitions"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsRangeFill",
+ "description": "\nThe *LengthsRangeFill* op takes a single input *lengths* and outputs a single tensor *range_sequence*. For each element of *lengths*, the op appends the range(0,lengths) vector to the end of *range_sequence*. For example, if input=[2,4,1], the output would be [0,1,0,1,2,3,0].\n\nGithub Links:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.h\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/filler_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsRangeFill\",\n [\"lengths\"],\n [\"range_sequence\"],\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([2,4,1]).astype(np.int32))\nprint(\"lengths:\\n\", workspace.FetchBlob(\"lengths\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"range_sequence: \\n\", workspace.FetchBlob(\"range_sequence\"))\n\n```\n\n**Result**\n\n```\n\nlengths:\n [2 4 1]\nrange_sequence:\n [0 1 0 1 2 3 0]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "1D tensor of int32 or int64 segment lengths.",
+ "name": "lengths"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D tensor whose size is the sum of *lengths*",
+ "name": "range_sequence"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsSplit",
+ "description": "\nGiven input vector LENGTHS, and input n_split, LengthsSplit returns\na single output vector. It \"splits\" each length into n_split values which add\nup to the original length. It will attempt to do equal splits, and if not possible,\nit orders larger values first. If the n_split is larger than the length, zero\npadding will be applied.\n\ne.g. LENGTHS = [9 4 5]\n n_split = 3\n Y = [3 3 3 2 1 1 2 2 1]\n\ne.g. LENGTHS = [2, 1, 2]\n n_split = 3\n Y = [1 1 0 1 0 0 1 1 0]\n",
+ "attributes": [
+ {
+ "description": "Number of splits for each element in LENGTHS",
+ "name": "n_split",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Mx1 Input tensor denoting INT32 lengths",
+ "name": "LENGTHS"
+ },
+ {
+ "description": "(Optional) Number of splits for each element in LENGTHS (overrides argument)",
+ "name": "n_split"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(M*n_split)x1 Output vector denoting split lengths",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsSum",
+ "description": "\nApplies 'Sum' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'Sum' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nSummation is done element-wise across slices of the input tensor and doesn't change the shape of the individual blocks.\n\n\nThe *LengthsSum* op takes two inputs *DATA* and *LENGTHS*, and produces a single output *OUTPUT*. The op finds the sum in each of the segments of *DATA*, where segments are defined by their lengths.\nFor example, if $DATA = [2,4,3,1,2,10]$ and $LENGTHS = [2,3,1]$ then $OUTPUT = [sum([2,4]), sum([3,1,2]), sum([10])] = [6,6,10]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsSum\",\n [\"DATA\", \"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [ 6. 6. 10.]\n\n```\n\n \n\n\n ",
+ "inputs": [
+ {
+ "description": "Input tensor, slices of which are aggregated.",
+ "name": "DATA"
+ },
+ {
+ "description": "Vector with the same sum of elements as the first dimension of DATA",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsSumGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsTile",
+ "description": "\nGiven DATA tensor of rank r >= 1, and LENGTHS tensor of rank 1, duplicate each\nentry of the outer-most dimension of DATA according to LENGTHS, and concatenate\nthem in an output tensor of rank r.\n\nExample:\n DATA = [\n [1.0, 1.2],\n [2.3, 3.4],\n [4.5, 5.7],\n [6.8, 7.9],\n ]\n LENGTHS = [0, 1, 3, 2]\n OUTPUT = [\n [2.3, 3.4],\n [4.5, 5.7],\n [4.5, 5.7],\n [4.5, 5.7],\n [6.8, 7.9],\n [6.8, 7.9],\n ]\n",
+ "inputs": [
+ {
+ "description": "Tensor of rank r >= 1. First dimension must be equal to the size of lengths",
+ "name": "DATA"
+ },
+ {
+ "description": "Tensor of int32 lengths of rank 1",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Tensor of rank r",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsToOffsets",
+ "description": "\nGiven a vector of segment lengths, returns a vector of offsets from these lengths,\nwhich will have the same size as the input vector. Output is going to have\nthe same type as input. For long tensors explicit casting from int32 to int64\nmight be necessary prior to this op.\n\nFor example, `[1, 3, 0, 2]` transforms into `[0, 1, 4, 4]`.\n",
+ "inputs": [
+ {
+ "description": "1D tensor of int32 or int64 segment lengths.",
+ "name": "lengths"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D tensor of the same shape and type as `lengths`",
+ "name": "offsets"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsTopK",
+ "description": "\nApply TopK to each segment of the input tensor, where segments are defined by\ntheir LENGTHS, and concatenate them in an output tensor of\nshape=(SIZE(LENGTHs), k). In case there's less than k values in a segment,\nthe output value will be padded by 0, and the corresponding output indices will\nbe padded by -1.\n",
+ "attributes": [
+ {
+ "description": "the number of top values to return for each segment, if the number of values is smaller than k, the values would be padded with 0 and indices would be padded with -1.",
+ "name": "k",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Tensor of rank 1. First dimension must be equal to the sum of lengths",
+ "name": "DATA"
+ },
+ {
+ "description": "Tensor of int32 lengths of rank 1",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Output top k elements for each segment, withshape=(SIZE(lengths), k)",
+ "name": "TopKValue"
+ },
+ {
+ "description": "Output indices in DATA corresponding to value in TopKValue",
+ "name": "TopKIndices"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsTopKGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsToRanges",
+ "description": "\nGiven a vector of segment lengths, calculates offsets of each segment and packs\nthem next to the lengths. For the input vector of length N the output is a Nx2\nmatrix with (offset, lengths) packaged for each segment.\n\nFor example, `[1, 3, 0, 2]` transforms into `[[0, 1], [1, 3], [4, 0], [4, 2]]`.\n",
+ "inputs": [
+ {
+ "description": "1D tensor of int32 segment lengths.",
+ "name": "lengths"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2D tensor of shape len(lengths) X 2 and the same type as `lengths`",
+ "name": "ranges"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsToSegmentIds",
+ "description": "\nGiven a vector of segment lengths (*lengths*) the *LengthsToSegmentIds* op returns a zero-based, consecutive vector of segment ids (*segment_ids*). For example, *lengths=[1, 3, 0, 2]* will produce *segment_ids=[0, 1, 1, 1, 3, 3]*. In general, the inverse operation is *SegmentIdsToLengths*. Notice though that trailing empty sequence lengths can't be properly recovered from segment ids.\n\nGithub Links:\n\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.cc\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/utility_ops.h\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsToSegmentIds\",\n [\"lengths\"],\n [\"segment_ids\"],\n)\n\nworkspace.FeedBlob(\"lengths\", np.array([1, 3, 0, 2]).astype(np.int32))\nprint(\"lengths:\\n\", workspace.FetchBlob(\"lengths\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"segment_ids: \\n\", workspace.FetchBlob(\"segment_ids\"))\n\n```\n\n**Result**\n\n```\n\nlengths:\n [1 3 0 2]\nsegment_ids:\n [0 1 1 1 3 3]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "1D tensor of int32 or int64 segment lengths.",
+ "name": "lengths"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D tensor of length *sum(lengths)*",
+ "name": "segment_ids"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsToShape",
+ "description": "\nThis operator takes a list of $N$ equal integers as input which represent the lengths of $N$ vectors. The output is the calculated shape of the matrix if the $N$ integers were combined into a single matrix.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/utility_ops.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsToShape\",\n [\"X\"],\n [\"Y\"]\n)\n\n// Create X: Sample softmax output for 5-class model\nX = np.array([2,2,2,2,2,2,2,2,2,2])\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.int32))\n\n// Run op\nworkspace.RunOperatorOnce(op)\n\n// Collect Output\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [2 2 2 2 2 2 2 2 2 2]\nY:\n [10 2]\n\n```\n\n \n\n ",
+ "inputs": [
+ {
+ "description": "List, of length $N$, of equal integers representing the lengths of several vectors.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Vector of length 2 describing the dimensions of the data if the $N$ vectors from the input were combined to a single matrix.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsToWeights",
+ "description": "\nSimilar as LengthsToSegmentIds but output vector of segment\nweights derived by lengths. i.e 1/pow(length, power)\n",
+ "attributes": [
+ {
+ "description": "n of 1/pow(length,n) for normalization",
+ "name": "power",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1-D int32_t or int64_t tensor of lengths",
+ "name": "lengths"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1-D float tensor of weights by length",
+ "name": "a vector of weights"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsWeightedSum",
+ "description": "\nApplies 'WeightedSum' to each segment of the input tensor. Segments are defined\nby their *LENGTHS*. *LENGTHS* is a vector that maps each of the slices of\n*DATA* to a particular segment. Values belonging to the same segment are\naggregated together and considered for the 'WeightedSum' operation.\n\nFor example *LENGTHS = [2, 1]* stands for segments *DATA[0..1]* and *DATA[2]*\n\nThe sum of elements in *LENGTHS* must equal the number of elements in the first\ndimension of *DATA*. The length of *OUTPUT* is equal to the number of input\nsegments, i.e. len(*LENGTHS*).\n\nInput slices are first scaled by SCALARS and then summed element-wise. It doesn't change the shape of the individual blocks.\n\n\nThe *LengthsWeightedSum* op takes three inputs *DATA*, *LENGTHS*, and *SCALARS*, and produces a single output *OUTPUT*. The op finds the weighted sum in each of the segments of *DATA*, where segments are defined by their lengths. Before calculating the sums, the input *DATA* is weighted by the contents of *SCALARS*.\nFor example, if $DATA = [2,4,3,1,2,10]$, $SCALARS = [8, 2, 1, 4, 1, 0.6]$, and $LENGTHS = [2,3,1]$, then $OUTPUT = [sum([8*2,2*4]), sum([1*3,4*1,1*2]), sum([0.6*10])] = [24,9,6]$.\n\nGithub Link:\n- https://github.com/caffe2/caffe2/blob/master/caffe2/operators/segment_reduction_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LengthsWeightedSum\",\n [\"DATA\", \"SCALARS\",\"LENGTHS\"],\n [\"OUTPUT\"],\n)\n\nworkspace.FeedBlob(\"DATA\", np.array([2,4,3,1,2,10]).astype(np.float32))\nprint(\"DATA:\\n\", workspace.FetchBlob(\"DATA\"))\n\nworkspace.FeedBlob(\"SCALARS\", np.array([8, 2, 1, 4, 1, 0.6]).astype(np.float32))\nprint(\"SCALARS:\\n\", workspace.FetchBlob(\"SCALARS\"))\n\nworkspace.FeedBlob(\"LENGTHS\", np.array([2,3,1]).astype(np.int32))\nprint(\"LENGTHS:\\n\", workspace.FetchBlob(\"LENGTHS\"))\n\nworkspace.RunOperatorOnce(op)\nprint(\"OUTPUT: \\n\", workspace.FetchBlob(\"OUTPUT\"))\n\n```\n\n**Result**\n\n```\n\nDATA:\n [ 2. 4. 3. 1. 2. 10.]\nSCALARS:\n [8. 2. 1. 4. 1. 0.6]\nLENGTHS:\n [2 3 1]\nOUTPUT:\n [24. 9. 6.]\n\n```\n\n \n\n\n ",
+ "attributes": [
+ {
+ "description": "Produce also gradient for `weights`. For now it's only supported in `Lengths`-based operators",
+ "name": "grad_on_weights",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "Input tensor for the summation",
+ "name": "DATA"
+ },
+ {
+ "description": "Scalar multipliers for the input slices. Must be a vector with the length matching the number of slices",
+ "name": "SCALARS"
+ },
+ {
+ "description": "Vector with the same sum of elements as the first dimension of DATA",
+ "name": "LENGTHS"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Aggregated output tensor. Has the first dimension of len(LENGTHS) ",
+ "name": "OUTPUT"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsWeightedSumGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LengthsWeightedSumWithMainInputGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Load",
+ "description": "\nThe Load operator loads a set of serialized blobs from a db or multiple dbs. It\ntakes $[0, \\infty)$ number of inputs and $[0, \\infty)$ number of outputs, using\nthe db keys to match the db entries with the outputs.\n\nIf at least one input is passed, then it is assumed that that input blobs are a\nset of DBReaders to load from. Otherwise the `db` or `dbs` argument is used to load\nblobs from one single db or multiple dbs respectively. `db_type` argument is used\nto specify the type of the input db/dbs.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/load_save_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Load\",\n [],\n [\"X\", \"Y\"],\n db=\"test_db\",\n db_type=\"lmdb\"\n)\n\nworkspace.RunOperatorOnce(op)\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "If set to non-zero, save the db directly to the path specified by the `db` arg. If not set (default), prepend the path of the current root folder of the workspace to the path specified by the `db` arg.",
+ "name": "absolute_path",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": "",
+ "description": "Blobs will be prefixed with this when loading. Useful for avoiding collisions with blobs existing in the workspace. The output blob names specified to this op should include this prefix.",
+ "name": "add_prefix",
+ "option": "optional",
+ "type": "string"
+ },
+ {
+ "default": "",
+ "description": "Characters in the provided blob names that match `strip_prefix` will be removed prior to saving. Also, characters that precede `strip_prefix` will be removed. Useful for removing device scope from blob names.",
+ "name": "strip_prefix",
+ "option": "optional",
+ "type": "string"
+ },
+ {
+ "description": "The output path of the db. See the `absolute_path` arg details for options regarding the current root folder of the workspace.",
+ "name": "db",
+ "option": "optional",
+ "type": "string"
+ },
+ {
+ "description": "List of paths to dbs to load blobs from. See the `absolute_path` arg details for options regarding the current root folder of the workspace.",
+ "name": "dbs",
+ "option": "optional",
+ "type": "string[]"
+ },
+ {
+ "description": "(type: string)* Type of db to save (options: \"lmdb\", \"leveldb\", \"minidb\").",
+ "name": "db_type",
+ "option": "optional"
+ },
+ {
+ "default": 0,
+ "description": "If nonzero, the blobs are loaded into the device that is specified in the serialized `BlobProto`. Otherwise, the device will be set as the one that the `Load` operator is being run under.",
+ "name": "keep_device",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": 0,
+ "description": "If nonzero, will load all blobs pointed to by the db to the workspace overwriting/creating blobs as needed.",
+ "name": "load_all",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": false,
+ "description": "If True, will allow not loading all the output blobs specified in the outputs.",
+ "name": "allow_incomplete",
+ "option": "optional",
+ "type": "boolean"
+ },
+ {
+ "description": "If set, used instead of output blob names to specify which blobs in the db shall be loaded. Must be the same length as number of output blobs.",
+ "name": "source_blob_names",
+ "option": "optional",
+ "type": "string[]"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: List(DBReader))* [OPTIONAL] List of DBReaders to load from. Can use this instead of the `db`/`dbs` args.",
+ "name": "X, Y, ..."
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Log",
+ "description": "\nCalculates the natural log of the given input tensor ($ln(x)$), element-wise. This\noperation can be done in an in-place fashion too, by providing the same input\nand output blobs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/log_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Log\",\n [\"X\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X before running op:\", workspace.FetchBlob(\"X\"))\nworkspace.RunOperatorOnce(op)\nprint(\"X after running op:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX before running op:\n[[0.07341351 0.15404125 0.386613 ]\n [0.34090295 0.99727786 0.24141751]\n [0.32016268 0.8724168 0.93515724]]\nX after running op:\n[[-2.6116474 -1.8705349 -0.9503311 ]\n [-1.0761575 -0.00272586 -1.4212275 ]\n [-1.138926 -0.13648799 -0.06704059]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input tensor.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor computed as the natural log of the input tensor computed, element-wise.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LogFatal",
+ "support_level": "default"
+ },
+ {
+ "name": "Logit",
+ "description": "\nElementwise logit transform: logit(x) = log(x / (1 - x)), where x is the\ninput data clampped in (eps, 1-eps).\n",
+ "attributes": [
+ {
+ "description": "small positive epsilon value, the default is 1e-6.",
+ "name": "eps (optional)",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "input float tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "output float tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LogitGradient",
+ "attributes": [
+ {
+ "description": "small positive epsilon value, the default is 1e-6.",
+ "name": "eps",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "input float tensor",
+ "name": "X"
+ },
+ {
+ "description": "input float tensor",
+ "name": "dY"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "output float tensor",
+ "name": "dX"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LongIndexCreate",
+ "description": "\nCreates a dictionary that maps int64 keys to consecutive integers\nfrom 1 to max_elements. Zero is reserved for unknown keys.\n",
+ "attributes": [
+ {
+ "description": "Max number of elements, including the zero entry.",
+ "name": "max_elements",
+ "option": "optional"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Pointer to an Index instance.",
+ "name": "handler"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LpNorm",
+ "description": "\nThis op computes the $L_p$ norm of the one dimensional input tensor $X$, and outputs a one dimensional output tensor $Y$. Here, the $L_p$ norm is calculated as\n\n$$L_p(\\mathbf{x}) = \\sum_i x_i^p$$\n\nThis op supports $p$ values of 1 or 2. If the average argument is set, the norm is calculated as Lp_averaged_norm(x) is defined as Lp_averaged_norm(x) = LpNorm(x) / size(x).\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lpnorm_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lpnorm_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LpNorm\",\n [\"X\"],\n [\"Y\"],\n p=2\n)\nX = np.array([5., 2.])\nprint(\"X:\\n\",X)\n\n// Feed X into workspace\nworkspace.FeedBlob(\"X\", X.astype(np.float32))\n\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [5. 2.]\nY:\n [29.]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 2,
+ "description": "Order of the norm in p-norm.",
+ "name": "p",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": false,
+ "description": "Whether we calculate norm or averaged_norm.The Lp_averaged_norm(x) is defined as Lp_averaged_norm(x) = LpNorm(x) / size(x)",
+ "name": "average",
+ "option": "optional",
+ "type": "boolean"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D Input tensor of data to be operated on.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor",
+ "name": "Z"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LpNormGradient",
+ "description": "\nGiven one input float tensor X, derivative dout, and produces one output\nfloat tensor dX. dX is the derivative of the Lp norm of tensor X, computed as\ndx = d(sum over |x^p|)/dx, in which p is either 1 or 2(currently only\nsupports l1 and l2 norm) determined by the argument p.\n",
+ "attributes": [
+ {
+ "description": "Order of the norm in p-norm",
+ "name": "p",
+ "option": "optional"
+ },
+ {
+ "description": "whehther we calculate norm or averaged_norm.The Lp_averaged_norm(x) is defined asLp_averaged_normgradient(x) = LpNormGradient(x) / size(x)",
+ "name": "average",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "1D input tensor",
+ "name": "X"
+ },
+ {
+ "description": "1D input tensor",
+ "name": "dout"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "1D output tensor",
+ "name": "dx"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LpPool",
+ "description": "\n`LpPool` consumes an input blob and applies max pooling across the the blob according to kernel sizes, stride sizes, pad lengths and dilation. $L_p$ pooling consists of taking the $L_p$ norm of a subset of the input tensor according to the kernel size and downsampling the data into the output blob for further processing.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the output blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/lp_pool_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LpPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n p=2.0\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n\n```\n\n**Result**\n\n```\n\nX:\n [[[[-1.1113514 -1.1173418 -0.1504435 0.1327146 -1.2221841 -0.5654315 ]\n [-1.9209646 -0.04675794 0.8604731 1.2042469 0.28154245 0.38656202]\n [-0.8772837 -0.03264008 0.26222762 0.28526652 0.321102 -2.5891325 ]\n [-0.9248281 1.440776 -0.56832 -0.6017927 1.2262512 -2.1443934 ]\n [ 0.5194415 -1.6858683 0.45221648 0.65029615 -0.8574544 0.8121054 ]\n [ 0.25902653 0.4934758 0.49870652 -0.48134378 -0.9178449 -0.07626943]]]]\n\nY:\n [[[[2.4851248 1.49361 1.4290358]\n [1.9240153 0.9139378 3.5928857]\n [1.8500228 1.0525136 1.4976646]]]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "description": "(*float*): type of $L_p$ norm to use (default=2.0)",
+ "name": "p",
+ "option": "optional"
+ },
+ {
+ "description": "(*int*): the size of the window to take a max over",
+ "name": "kernel",
+ "option": "optional"
+ },
+ {
+ "description": "(*int*): the stride of the window",
+ "name": "stride",
+ "option": "optional"
+ },
+ {
+ "description": "(*int*): implicit zero padding to be added on both sides",
+ "name": "pad",
+ "option": "optional"
+ },
+ {
+ "description": "(*int*): parameter that controls the stride of elements in the window",
+ "name": "dilation",
+ "option": "optional"
+ },
+ {
+ "description": "(*string*): order of blob dimensions (default=\"NCHW\")",
+ "name": "order",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "(*Tensor``*): input tensor",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "(*Tensor``*): output tensor",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LpPoolGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LRN",
+ "category": "Normalization",
+ "description": "\n\n`LRN` applies Local Response Normalization to an input blob. This operation performs\na kind of \"lateral inhibition\" by normalizing over local input regions, where\nnormalization is applied across channels. This operator is typically used to\nnormalize an unbounded activation (such as ReLU). The output shape is the same as\nthe input shape. The `brew` module has a wrapper for this operator for use in a\n`ModelHelper` object.\n\nThe formula for LRN is as follows:\n\n$$b_{c} = a_{c}(bias + \\frac{\\alpha}{n}\\sum_{c'=max(0,c-n/2)}^{min(N-1,c+n/2)} a_{c'}^2 )^{-\\beta}$$\n\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/local_response_normalization_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/local_response_normalization_op.cc\n\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\"LRN\",\n [\"X\"],\n [\"Y\", \"Y_scale\"],\n size=11,\n alpha=0.001,\n beta=0.5,\n bias=2.0,\n order=\"NHWC\"\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 6, 6, 1).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\nprint(\"Y_scale:\\n\", workspace.FetchBlob(\"Y_scale\"))\n```\n\n**Result**\n\n```\nX:\n [[[[ 0.72985137]\n [-0.3753357 ]\n [ 2.7344604 ]\n [-0.5937792 ]\n [ 0.38440478]\n [-2.1659644 ]]\n\n [[-0.92846817]\n [-0.9996144 ]\n [ 0.212943 ]\n [-1.968045 ]\n [-0.77839696]\n [ 0.45492038]]\n\n [[-0.11263168]\n [ 1.9901097 ]\n [ 0.19275683]\n [ 0.15630436]\n [ 0.7536298 ]\n [-0.77339894]]\n\n [[ 0.8353551 ]\n [-0.7784452 ]\n [ 1.779317 ]\n [ 0.22421335]\n [ 1.3846219 ]\n [-3.0546608 ]]\n\n [[ 0.09977621]\n [ 2.2071757 ]\n [ 0.79971045]\n [ 3.563886 ]\n [-0.7169287 ]\n [ 0.77170426]]\n\n [[-1.4296649 ]\n [ 0.19181213]\n [ 0.45961624]\n [-1.0201577 ]\n [ 0.62854475]\n [-0.6395456 ]]]]\n\nY:\n [[[[ 0.5160766 ]\n [-0.26540157]\n [ 1.9332271 ]\n [-0.41986194]\n [ 0.27181432]\n [-1.5314047 ]]\n\n [[-0.6565133 ]\n [-0.7068181 ]\n [ 0.15057328]\n [-1.3914955 ]\n [-0.5504022 ]\n [ 0.32167578]]\n\n [[-0.0796426 ]\n [ 1.4070934 ]\n [ 0.13629955]\n [ 0.11052381]\n [ 0.53288984]\n [-0.5468682 ]]\n\n [[ 0.5906759 ]\n [-0.5504363 ]\n [ 1.2580767 ]\n [ 0.1585426 ]\n [ 0.9790328 ]\n [-2.1595135 ]]\n\n [[ 0.07055242]\n [ 1.5605361 ]\n [ 0.5654725 ]\n [ 2.5193207 ]\n [-0.50693923]\n [ 0.54567 ]]\n\n [[-1.0108787 ]\n [ 0.13563155]\n [ 0.3249962 ]\n [-0.72134334]\n [ 0.44444424]\n [-0.45222285]]]]\nY_scale:\n [[[[2.0000484]\n [2.0000129]\n [2.0006797]\n [2.000032 ]\n [2.0000134]\n [2.0004265]]\n\n [[2.0000784]\n [2.0000908]\n [2.000004 ]\n [2.0003521]\n [2.000055 ]\n [2.0000188]]\n\n [[2.0000012]\n [2.00036 ]\n [2.0000033]\n [2.0000021]\n [2.0000517]\n [2.0000544]]\n\n [[2.0000634]\n [2.000055 ]\n [2.0002878]\n [2.0000045]\n [2.0001743]\n [2.0008483]]\n\n [[2.000001 ]\n [2.000443 ]\n [2.0000582]\n [2.0011547]\n [2.0000467]\n [2.0000541]]\n\n [[2.0001857]\n [2.0000033]\n [2.0000193]\n [2.0000947]\n [2.000036 ]\n [2.0000372]]]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Amount of neighboring channels to sum over for normalization",
+ "name": "size",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": 0.0,
+ "description": "Multiplicative (scaling) factor.",
+ "name": "alpha",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "default": 0.0,
+ "description": "Exponent.",
+ "name": "beta",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "default": 1.0,
+ "description": "Additive factor.",
+ "name": "bias",
+ "option": "optional",
+ "type": "float32"
+ },
+ {
+ "default": 0,
+ "description": "Order of blob dimensions.",
+ "name": "order",
+ "option": "optional",
+ "type": "float32"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor (ReLU output).",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor.",
+ "name": "Y"
+ },
+ {
+ "description": "*(type: Tensor``)* Output scale.",
+ "name": "Y_scale"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LRNGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "LSTMUnit",
+ "description": "\nLSTMUnit computes the activations of a standard LSTM (without peephole\nconnections), in a sequence-length aware fashion.\n\nConcretely, given the (fused) inputs X (TxNxD), the previous cell\nstate (NxD), and the sequence lengths (N), computes the LSTM\nactivations, avoiding computation if the input is invalid (as in, the\nvalue at X{t][n] >= seqLengths[n].\n\n",
+ "attributes": [
+ {
+ "description": "Bias term to add in while calculating forget gate",
+ "name": "forget_bias",
+ "option": "optional"
+ },
+ {
+ "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.",
+ "name": "sequence_lengths",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LSTMUnitGradient",
+ "attributes": [
+ {
+ "description": "When false, the sequence lengths input is left out, and all following inputs are shifted left by one.",
+ "name": "sequence_lengths",
+ "option": "optional"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "LT",
+ "description": "\nPerforms element-wise less than comparison **<** (with limited broadcast support).\n\n\nIf necessary the right-hand-side argument will be broadcasted to match the\nshape of left-hand-side argument. When broadcasting is specified, the second\ntensor can either be of size 1 (a scalar value), or having its shape as a\ncontiguous subset of the first tensor's shape. The starting of the mutually\nequal shape is specified by the argument \"axis\", and if it is not set, suffix\nmatching is assumed. 1-dim expansion doesn't work yet.\n\nFor example, the following tensor shapes are supported (with broadcast=1):\n```\n shape(A) = (2, 3, 4, 5), shape(B) = (,), i.e. B is a scalar\n shape(A) = (2, 3, 4, 5), shape(B) = (5,)\n shape(A) = (2, 3, 4, 5), shape(B) = (4, 5)\n shape(A) = (2, 3, 4, 5), shape(B) = (3, 4), with axis=1\n shape(A) = (2, 3, 4, 5), shape(B) = (2), with axis=0\n```\nArgument `broadcast=1` needs to be passed to enable broadcasting.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/elementwise_ops_schema.cc\n\n\n\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"LT\",\n [\"A\", \"B\"],\n [\"C\"],\n)\n\nworkspace.FeedBlob(\"A\", np.array([1, 5, 2, 9, 12, 3]))\nworkspace.FeedBlob(\"B\", np.array([1, 3, 4, 9, 12, 8]))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"C:\", workspace.FetchBlob(\"C\"))\n\n```\n\n**Result**\n\n```\n\nA: [ 1 5 2 9 12 3]\nB: [ 1 3 4 9 12 8]\nC: [False False True False False True]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "description": "Pass 1 to enable broadcasting.",
+ "name": "broadcast",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": -1,
+ "description": "Axis to concatenate on. If set, defines the broadcast dimensions.",
+ "name": "axis",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* First operand, should share the type with the second operand.",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* Second operand. With broadcasting can be of smaller size than `A`. If broadcasting is disabled it should be of the same size.",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions as `A`.",
+ "name": "C"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MakeTwoClass",
+ "description": "\nGiven a vector of probabilities, this operator transforms this into a 2-column\n matrix with complimentary probabilities for binary classification. In explicit\n terms, given the vector X, the output Y is vstack(1 - X, X).\n ",
+ "inputs": [
+ {
+ "description": "Input vector of probabilities",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "2-column matrix with complimentary probabilities of X for binary classification",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MakeTwoClassGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "MapToKeyValue",
+ "description": "Convert a map blob into key and value blob pairs",
+ "inputs": [
+ {
+ "description": "Blob reference to the map",
+ "name": "map blob"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Blob reference to the key",
+ "name": "key blob"
+ },
+ {
+ "description": "Blob reference to the value",
+ "name": "value blob"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MarginRankingCriterion",
+ "description": "\nMarginRankingCriterion takes two input data X1 (Tensor),\nX2 (Tensor), and label Y (Tensor) to produce the\nloss (Tensor) where the loss function,\nloss(X1, X2, Y) = max(0, -Y * (X1 - X2) + margin), is applied to\nthe tensor elementwise.\n\nIf y == 1 then it assumed the first input should be ranked higher\n(have a larger value) than the second input, and vice-versa for\ny == -1.\n",
+ "attributes": [
+ {
+ "description": "The margin value as a float. Default is 1.0.",
+ "name": "margin",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "The left input vector as a 1-dim TensorCPU.",
+ "name": "X1"
+ },
+ {
+ "description": "The right input vector as a 1-dim TensorCPU.",
+ "name": "X2"
+ },
+ {
+ "description": "The label as a 1-dim TensorCPU with int value of 1 or -1.",
+ "name": "Y"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "The output loss with the same dimensionality as X1.",
+ "name": "loss"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MarginRankingCriterionGradient",
+ "description": "\nMarginRankingCriterionGradient takes both X1, X2, Y and dY and\nuses them to update dX1, and dX2 according to the chain rule\nand derivatives of the loss function.\n",
+ "support_level": "default"
+ },
+ {
+ "name": "MatMul",
+ "description": "\nMatrix multiplication $Y = A * B$, where `A` has size (M x K), `B` has size\n(K x N), and `Y` will have a size (M x N). To transpose `A` or `B` before\nmultiplication, pass 1 to the `trans_a` and/or `trans_b` arguments, which\nseparate the first and second dimensions of the respective matrices using\n`axis_a` and `axis_b`.\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/matmul_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MatMul\",\n [\"A\", \"B\"],\n [\"Y\"],\n)\n\nworkspace.FeedBlob(\"A\", np.random.randint(10, size=(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"B\", np.random.randint(10, size=(3,3)).astype(np.float32))\nprint(\"A:\", workspace.FetchBlob(\"A\"))\nprint(\"B:\", workspace.FetchBlob(\"B\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nA: [[1. 8. 3.]\n [6. 4. 4.]\n [5. 4. 7.]]\nB: [[4. 0. 3.]\n [3. 1. 1.]\n [8. 5. 8.]]\nY: [[52. 23. 35.]\n [68. 24. 54.]\n [88. 39. 75.]]\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 1,
+ "description": "Exclusive axis that divides the first and second dimension of matrix `A`.",
+ "name": "axis_a",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": 1,
+ "description": "Exclusive axis that divides the first and second dimension of matrix `B`.",
+ "name": "axis_b",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": 0,
+ "description": "Pass 1 to transpose `A` before multiplication and after the dimension adjustment using `axis_a`.",
+ "name": "trans_a",
+ "option": "optional",
+ "type": "int64"
+ },
+ {
+ "default": 0,
+ "description": "Pass 1 to transpose `B` before multiplication and after the dimension adjustment using `axis_b`.",
+ "name": "trans_b",
+ "option": "optional",
+ "type": "int64"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* 2D matrix of size (M x K).",
+ "name": "A"
+ },
+ {
+ "description": "*(type: Tensor``)* 2D matrix of size (K x N).",
+ "name": "B"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* 2D matrix of size (M x N).",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "Max",
+ "description": "\nElement-wise max of an arbitrary number of input tensors. This operation can be\nperformed in-place, by using the first input blob as the output blob. All inputs\nmust have the same shape and data type, and the output will have the same shape\nas the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/minmax_ops.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Max\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Max:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.4496477 0.07061381 0.7139333 ]\n [0.83203 0.05970785 0.72786295]\n [0.75988126 0.04601283 0.32820013]]\nY:\n[[0.05683139 0.16872478 0.671098 ]\n [0.70739156 0.09878621 0.03416285]\n [0.34087983 0.94986707 0.67263436]]\nZ:\n[[0.48051122 0.07141234 0.85264146]\n [0.77086854 0.22082241 0.13154659]\n [0.42401117 0.995431 0.4263775 ]]\nMax:\n[[0.48051122 0.16872478 0.85264146]\n [0.83203 0.22082241 0.72786295]\n [0.75988126 0.995431 0.67263436]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* List of input tensors with the same shape.",
+ "name": "X, Y, ..."
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with same dimensions as input(s).Contains the maximum valued element at each location.",
+ "name": "M"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MaxGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool",
+ "category": "Pool",
+ "description": "MaxPool \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n \n\n",
+ "attributes": [
+ {
+ "default": 0,
+ "name": "order"
+ },
+ {
+ "default": 0,
+ "name": "pad"
+ },
+ {
+ "name": "cudnn_exhaustive_search",
+ "type": "boolean",
+ "visible": false
+ }
+ ],
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool1D",
+ "description": "MaxPool1D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool1DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool2D",
+ "description": "MaxPool2D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool2DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool3D",
+ "description": "MaxPool3D \nconsumes an input blob and applies max pooling across the the blob according to\nkernel sizes, stride sizes, pad lengths and dilation. Max pooling consists of\ntaking the maximum value of a subset of the input tensor according to the kernel\nsize and downsampling the data into the output blob for further processing. The\n`brew` module has a wrapper for this operator for use in a `ModelHelper` object.\n\nPooling layers reduce the spatial dimensionality of the input blob. Each of the\noutput blob's dimensions will reduce according to:\n\n$$dim_{out}=\\frac{dim_{in}-kernel+2*pad}{stride}+1$$\n\nGithub Links:\n\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.h\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/pool_op.cc\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/conv_pool_op_base.h\n\n\n\n Example
\n\n**Code**\n\n```\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"MaxPool\",\n [\"X\"],\n [\"Y\"],\n kernel=2,\n stride=2,\n)\n\nworkspace.FeedBlob(\"X\", np.random.randn(1, 1, 6, 6).astype(np.float32)) // NCHW\nprint(\"X:\\n\", workspace.FetchBlob(\"X\"), \"\\n\")\nworkspace.RunOperatorOnce(op)\nprint(\"Y:\\n\", workspace.FetchBlob(\"Y\"))\n```\n\n**Result**\n\n```\nX:\n [[[[-2.8534958e-01 -1.7719941e+00 -8.2277227e-04 1.1088650e+00\n -2.1476576e+00 -3.5070452e-01]\n [-9.0058845e-01 -3.0070004e-01 -1.7907504e+00 -7.1746534e-01\n 1.2798511e+00 -3.2214901e-01]\n [ 1.5806322e+00 1.6845188e+00 -2.6633200e-01 -3.8576153e-01\n -9.6424848e-02 -3.9696163e-01]\n [ 1.2572408e-01 6.3612902e-01 -3.9554062e-01 -6.9735396e-01\n -9.1898698e-01 -1.9609968e-01]\n [-1.1587460e+00 2.4605224e+00 -1.5497679e+00 1.3020347e-01\n -8.1293899e-01 -7.8803545e-01]\n [ 1.4323474e+00 1.3618395e+00 9.8975077e-02 -1.1307785e-01\n 7.2035044e-01 2.7642491e-01]]]]\n\nY:\n [[[[-0.28534958 1.108865 1.2798511 ]\n [ 1.6845188 -0.266332 -0.09642485]\n [ 2.4605224 0.13020347 0.72035044]]]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* Input data tensor of shape NCHW or NHWC.",
+ "name": "X"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output data tensor.",
+ "name": "Y"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPool3DGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "MaxPoolGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "Mean",
+ "description": "\nElement-wise mean of an arbitrary number of input tensors. This operation can be\nperformed in-place, by using the first input blob as the output blob. All inputs\nmust have the same shape and data type, and the output will have the same shape\nas the inputs.\n\nGithub Link:\n- https://github.com/pytorch/pytorch/blob/master/caffe2/operators/mean_op.cc\n\n\n\n Example
\n\n**Code**\n\n```\n\nworkspace.ResetWorkspace()\n\nop = core.CreateOperator(\n \"Mean\",\n [\"X\", \"Y\", \"Z\"],\n [\"X\"],\n)\n\nworkspace.FeedBlob(\"X\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Y\", (np.random.rand(3,3)).astype(np.float32))\nworkspace.FeedBlob(\"Z\", (np.random.rand(3,3)).astype(np.float32))\nprint(\"X:\", workspace.FetchBlob(\"X\"))\nprint(\"Y:\", workspace.FetchBlob(\"Y\"))\nprint(\"Z:\", workspace.FetchBlob(\"Z\"))\nworkspace.RunOperatorOnce(op)\nprint(\"Mean:\", workspace.FetchBlob(\"X\"))\n\n```\n\n**Result**\n\n```\n\nX:\n[[0.6035237 0.5305746 0.6298913 ]\n [0.9169737 0.01280353 0.16286302]\n [0.6017664 0.9946255 0.05128575]]\nY:\n[[0.07544111 0.45371833 0.08460239]\n [0.9708728 0.7422064 0.7933344 ]\n [0.97671497 0.3411384 0.73818344]]\nZ:\n[[0.08837954 0.90187573 0.46734726]\n [0.6308827 0.8719029 0.39888734]\n [0.90059936 0.92883426 0.5695987 ]]\nMean:\n[[0.25578147 0.6287229 0.39394698]\n [0.8395764 0.5423043 0.45169494]\n [0.8263602 0.75486606 0.45302266]]\n\n```\n\n \n\n",
+ "inputs": [
+ {
+ "description": "*(type: Tensor``)* List of input tensors with the same shape.",
+ "name": "X, Y, ..."
+ }
+ ],
+ "outputs": [
+ {
+ "description": "*(type: Tensor``)* Output tensor with the same dimensions as inputs. Contains the mean values of the input tensors calculated element-wise.",
+ "name": "M"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MeanGradient",
+ "support_level": "default"
+ },
+ {
+ "name": "MergeDenseFeatureTensors",
+ "description": "Merge given multi-feature dense tensors into one multi-feature tensor.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n",
+ "attributes": [
+ {
+ "description": "feature ids",
+ "name": "feature_ids",
+ "option": "optional"
+ }
+ ],
+ "inputs": [
+ {
+ "description": "",
+ "name": "in1"
+ },
+ {
+ "description": ".presence",
+ "name": "in1_presence"
+ }
+ ],
+ "outputs": [
+ {
+ "description": ".lengths",
+ "name": "out_lengths"
+ },
+ {
+ "description": ".keys",
+ "name": "out_keys"
+ },
+ {
+ "description": ".values",
+ "name": "out_values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeDim",
+ "description": "\nMerge first two dimensions in a single dimension with size dim(0) * dim(1).\n",
+ "inputs": [
+ {
+ "description": "An input tensor.",
+ "name": "data"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Reshaped tensor.",
+ "name": "reshaped"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeIdLists",
+ "description": "\nMergeIdLists: Merge multiple ID_LISTs into a single ID_LIST.\n\nAn ID_LIST is a list of IDs (may be ints, often longs) that represents a single\nfeature. As described in https://caffe2.ai/docs/sparse-operations.html, a batch\nof ID_LIST examples is represented as a pair of lengths and values where the\n`lengths` (int32) segment the `values` or ids (int32/int64) into examples.\n\nGiven multiple inputs of the form lengths_0, values_0, lengths_1, values_1, ...\nwhich correspond to lengths and values of ID_LISTs of different features, this\noperator produces a merged ID_LIST that combines the ID_LIST features. The\nfinal merged output is described by a lengths and values vector.\n\nWARNING: The merge makes no guarantee about the relative order of ID_LISTs\nwithin a batch. This can be an issue if ID_LIST are order sensitive.\n",
+ "inputs": [
+ {
+ "description": "Lengths of the ID_LISTs batch for first feature",
+ "name": "lengths_0"
+ },
+ {
+ "description": "Values of the ID_LISTs batch for first feature",
+ "name": "values_0"
+ }
+ ],
+ "outputs": [
+ {
+ "description": "Lengths of the merged ID_LISTs batch",
+ "name": "merged_lengths"
+ },
+ {
+ "description": "Values of the merged ID_LISTs batch",
+ "name": "merged_values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeMultiListFeatureTensors",
+ "description": "Merge given multi-feature tensors with list features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n",
+ "inputs": [
+ {
+ "description": ".lengths",
+ "name": "in1_lengths"
+ },
+ {
+ "description": ".keys",
+ "name": "in1_keys"
+ },
+ {
+ "description": ".values.lengths",
+ "name": "in1_values_lengths"
+ },
+ {
+ "description": ".values.values",
+ "name": "in1_values_values"
+ }
+ ],
+ "outputs": [
+ {
+ "description": ".lengths",
+ "name": "out_lengths"
+ },
+ {
+ "description": ".keys",
+ "name": "out_keys"
+ },
+ {
+ "description": ".values.lengths",
+ "name": "out_values_lengths"
+ },
+ {
+ "description": ".values.values",
+ "name": "out_values_values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeMultiListFeatureTensorsGradient",
+ "description": "Explode given multi-feature tensors with list features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n",
+ "inputs": [
+ {
+ "description": ".lengths",
+ "name": "in1_lengths"
+ },
+ {
+ "description": ".values.lengths",
+ "name": "in1_values_lengths"
+ },
+ {
+ "description": ".values.values_grad",
+ "name": "out_values_values_grad"
+ }
+ ],
+ "outputs": [
+ {
+ "description": ".values.values_grad",
+ "name": "in1_values_values_grad"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeMultiMapFeatureTensors",
+ "description": "Merge given multi-feature tensors with map features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n",
+ "inputs": [
+ {
+ "description": ".lengths",
+ "name": "in1_lengths"
+ },
+ {
+ "description": ".keys",
+ "name": "in1_keys"
+ },
+ {
+ "description": ".values.lengths",
+ "name": "in1_values_lengths"
+ },
+ {
+ "description": ".values.keys",
+ "name": "in1_values_keys"
+ },
+ {
+ "description": ".values.values",
+ "name": "in1_values_values"
+ }
+ ],
+ "outputs": [
+ {
+ "description": ".lengths",
+ "name": "out_lengths"
+ },
+ {
+ "description": ".keys",
+ "name": "out_keys"
+ },
+ {
+ "description": ".values_lengths",
+ "name": "out_values_lengths"
+ },
+ {
+ "description": ".values.keys",
+ "name": "out_values_keys"
+ },
+ {
+ "description": ".values.values",
+ "name": "out_values_values"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeMultiMapFeatureTensorsGradient",
+ "description": "Explode given multi-feature tensors with map features into many.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.keys K\n .values.values V\n\n You can read more about representing batches of lists and maps here:\n https://our.intern.facebook.com/intern/dex/caffe2/sparse-operations/\n",
+ "inputs": [
+ {
+ "description": ".lengths",
+ "name": "in1_lengths"
+ },
+ {
+ "description": ".values.lengths",
+ "name": "in1_values_lengths"
+ },
+ {
+ "description": ".values.values_grad",
+ "name": "out_values_values_grad"
+ }
+ ],
+ "outputs": [
+ {
+ "description": ".values.values_grad",
+ "name": "in1_values_values_grad"
+ }
+ ],
+ "support_level": "default"
+ },
+ {
+ "name": "MergeMultiScalarFeatureTensors",
+ "description": "Merge given multi-feature tensors with scalar features into one.\n Single-feature representation:\n - scalar features:\n T\n - list features:\n .lengths int32\n .values T\n - map features:\n .lengths int32\n .keys K\n .values V\n\n Missing values are set to zero, and value presence flag is set accordingly:\n .presence bool\n\n Multi-feature representation:\n - scalar features:\n .lengths int32\n .keys int64\n .values T\n - list features:\n .lengths int32\n .keys int64\n .values.lengths int32\n .values.values T\n - map features:\n .lengths int32\n