Skip to content

Commit

Permalink
[Tests/Deps] Update qonnx commit and update tests
Browse files Browse the repository at this point in the history
  • Loading branch information
auphelia committed Jul 5, 2023
1 parent 14192c6 commit 85f37d4
Show file tree
Hide file tree
Showing 6 changed files with 56 additions and 98 deletions.
2 changes: 1 addition & 1 deletion fetch-repos.sh
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

QONNX_COMMIT="6ca8f8e0af84e49facac5cdc34735eaf6e938300"
QONNX_COMMIT="0aec35a16948155e81c1640b71650206e733db3e"
FINN_EXP_COMMIT="0aa7e1c44b20cf085b6fe42cff360f0a832afd2c"
BREVITAS_COMMIT="9bb26bf2798de210a267d1e4aed4c20087e0e8a5"
PYVERILATOR_COMMIT="766e457465f5c0dd315490d7b9cc5d74f9a76f4f"
Expand Down
3 changes: 2 additions & 1 deletion src/finn/transformation/qonnx/infer_quant_avg_pool_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,8 @@ def apply(self, model):

# Trunc node
rounding_mode = get_by_name(t_node.attribute, "rounding_mode")
if rounding_mode is None or rounding_mode.s != b"FLOOR":
normalized_mode_string = rounding_mode.s.upper()
if rounding_mode is None or normalized_mode_string != b"FLOOR":
raise ValueError(
"The Trunc node must have the rounding_mode " "set to 'FLOOR'."
)
Expand Down
94 changes: 33 additions & 61 deletions tests/end2end/test_end2end_cybsec_mlp.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,9 +37,8 @@
import torch
import torch.nn as nn
from brevitas.core.quant import QuantType
from brevitas.export import export_finn_onnx, export_qonnx
from brevitas.export import export_qonnx
from brevitas.nn import QuantIdentity, QuantLinear, QuantReLU
from brevitas.quant_tensor import QuantTensor
from qonnx.core.datatype import DataType
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.util.cleanup import cleanup as qonnx_cleanup
Expand All @@ -55,13 +54,13 @@
build_dir = os.environ["FINN_BUILD_DIR"]


def get_checkpoint_name(step, QONNX_export):
def get_checkpoint_name(step):
if step == "build":
# checkpoint for build step is an entire dir
return build_dir + "/end2end_cybsecmlp_build_QONNX-%d" % (QONNX_export)
return build_dir + "/end2end_cybsecmlp_build"
else:
# other checkpoints are onnx files
return build_dir + "/end2end_cybsecmlp_QONNX-%d_%s.onnx" % (QONNX_export, step)
return build_dir + "/end2end_cybsecmlp_%s.onnx" % step


class CybSecMLPForExport(nn.Module):
Expand All @@ -82,9 +81,8 @@ def forward(self, x):
return out_final


@pytest.mark.parametrize("QONNX_export", [False, True])
@pytest.mark.end2end
def test_end2end_cybsec_mlp_export(QONNX_export):
def test_end2end_cybsec_mlp_export():
assets_dir = pk.resource_filename("finn.qnn-data", "cybsec-mlp/")
# load up trained net in Brevitas
input_size = 593
Expand Down Expand Up @@ -116,72 +114,45 @@ def test_end2end_cybsec_mlp_export(QONNX_export):
W_new = np.pad(W_orig, [(0, 0), (0, 7)])
model[0].weight.data = torch.from_numpy(W_new)
model_for_export = CybSecMLPForExport(model)
export_onnx_path = get_checkpoint_name("export", QONNX_export)
export_onnx_path = get_checkpoint_name("export")
input_shape = (1, 600)
# create a QuantTensor instance to mark the input as bipolar during export
input_a = np.random.randint(0, 1, size=input_shape).astype(np.float32)
input_a = 2 * input_a - 1
scale = 1.0
input_t = torch.from_numpy(input_a * scale)
input_qt = QuantTensor(
input_t, scale=torch.tensor(scale), bit_width=torch.tensor(1.0), signed=True
)

if QONNX_export:
# With the onnx export from Brevitas we need to manually set
# the FINN DataType at the input
export_qonnx(model_for_export, torch.randn(input_shape), export_path=export_onnx_path)
model = ModelWrapper(export_onnx_path)
model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"])
model.save(export_onnx_path)
qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(ConvertQONNXtoFINN())
model.save(export_onnx_path)
else:
export_finn_onnx(
model_for_export,
export_path=export_onnx_path,
input_t=input_qt,
input_names=["onnx::Mul_0"],
)
# With the onnx export from Brevitas we need to manually set
# the FINN DataType at the input
export_qonnx(model_for_export, torch.randn(input_shape), export_path=export_onnx_path)
model = ModelWrapper(export_onnx_path)
model.set_tensor_datatype(model.graph.input[0].name, DataType["BIPOLAR"])
model.save(export_onnx_path)
qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(ConvertQONNXtoFINN())
assert os.path.isfile(export_onnx_path)
# fix input datatype
finn_model = ModelWrapper(export_onnx_path)
finnonnx_in_tensor_name = finn_model.graph.input[0].name
assert tuple(finn_model.get_tensor_shape(finnonnx_in_tensor_name)) == (1, 600)
finnonnx_in_tensor_name = model.graph.input[0].name
assert tuple(model.get_tensor_shape(finnonnx_in_tensor_name)) == (1, 600)
# verify a few exported ops
if QONNX_export:
# The first "Mul" node doesn't exist in the QONNX export,
# because the QuantTensor scale is not exported.
# However, this node would have been unity scale anyways and
# the models are still equivalent.
assert finn_model.graph.node[0].op_type == "Add"
assert finn_model.graph.node[1].op_type == "Div"
assert finn_model.graph.node[2].op_type == "MatMul"
assert finn_model.graph.node[-1].op_type == "MultiThreshold"
else:
assert finn_model.graph.node[0].op_type == "Mul"
assert finn_model.get_initializer(finn_model.graph.node[0].input[1]) == 1.0
assert finn_model.graph.node[1].op_type == "Add"
assert finn_model.graph.node[2].op_type == "Div"
assert finn_model.graph.node[3].op_type == "MatMul"
assert finn_model.graph.node[-1].op_type == "MultiThreshold"
# The first "Mul" node doesn't exist in the QONNX export,
# because the QuantTensor scale is not exported.
# However, this node would have been unity scale anyways and
# the models are still equivalent.
assert model.graph.node[0].op_type == "Add"
assert model.graph.node[1].op_type == "Div"
assert model.graph.node[2].op_type == "MatMul"
assert model.graph.node[-1].op_type == "MultiThreshold"
# verify datatypes on some tensors
assert finn_model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"]
first_matmul_w_name = finn_model.get_nodes_by_op_type("MatMul")[0].input[1]
assert finn_model.get_tensor_datatype(first_matmul_w_name) == DataType["INT2"]
assert model.get_tensor_datatype(finnonnx_in_tensor_name) == DataType["BIPOLAR"]
first_matmul_w_name = model.get_nodes_by_op_type("MatMul")[0].input[1]
assert model.get_tensor_datatype(first_matmul_w_name) == DataType["INT2"]


@pytest.mark.slow
@pytest.mark.vivado
@pytest.mark.end2end
@pytest.mark.parametrize("QONNX_export", [False, True])
def test_end2end_cybsec_mlp_build(QONNX_export):
model_file = get_checkpoint_name("export", QONNX_export)
def test_end2end_cybsec_mlp_build():
model_file = get_checkpoint_name("export")
load_test_checkpoint_or_skip(model_file)
build_env = get_build_env(build_kind, target_clk_ns)
output_dir = make_build_dir(f"test_end2end_cybsec_mlp_build_QONNX-{QONNX_export}")
output_dir = make_build_dir("test_end2end_cybsec_mlp_build")

cfg = build.DataflowBuildConfig(
output_dir=output_dir,
Expand Down Expand Up @@ -219,4 +190,5 @@ def test_end2end_cybsec_mlp_build(QONNX_export):
est_res_dict = json.load(f)
assert est_res_dict["total"]["LUT"] == 7904.0
assert est_res_dict["total"]["BRAM_18K"] == 36.0
shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build", QONNX_export))
shutil.copytree(output_dir + "/deploy", get_checkpoint_name("build"))
shutil.rmtree(get_checkpoint_name("build"))
11 changes: 6 additions & 5 deletions tests/fpgadataflow/test_convert_to_hls_layers_cnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
import numpy as np
import os
import torch
from brevitas.export import export_finn_onnx
from brevitas.export import export_qonnx
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.custom_op.registry import getCustomOp
from qonnx.transformation.bipolar_to_xnor import ConvertBipolarMatMulToXnorPopcount
Expand All @@ -46,13 +46,15 @@
from qonnx.transformation.infer_data_layouts import InferDataLayouts
from qonnx.transformation.infer_shapes import InferShapes
from qonnx.transformation.lower_convs_to_matmul import LowerConvsToMatMul
from qonnx.util.cleanup import cleanup as qonnx_cleanup

import finn.core.onnx_exec as oxe
import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls
import finn.transformation.streamline.absorb as absorb
from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN
from finn.transformation.streamline import Streamline
from finn.transformation.streamline.reorder import MakeMaxPoolNHWC
from finn.util.test import get_test_model_trained
Expand All @@ -66,8 +68,10 @@
@pytest.mark.parametrize("fused_activation", [True, False])
def test_convert_to_hls_layers_cnv_w1a1(fused_activation):
cnv = get_test_model_trained("CNV", 1, 1)
export_finn_onnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv)
export_qonnx(cnv, torch.randn(1, 3, 32, 32), export_onnx_path_cnv)
qonnx_cleanup(export_onnx_path_cnv, out_file=export_onnx_path_cnv)
model = ModelWrapper(export_onnx_path_cnv)
model = model.transform(ConvertQONNXtoFINN())
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(GiveUniqueNodeNames())
Expand All @@ -81,7 +85,6 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation):
model = model.transform(ConvertBipolarMatMulToXnorPopcount())
model = model.transform(Streamline())
model = model.transform(InferDataLayouts())
# model.save("golden.onnx")
# load one of the test vectors
fn = pk.resource_filename("finn.qnn-data", "cifar10/cifar10-test-data-class3.npz")
input_tensor = np.load(fn)["arr_0"].astype(np.float32)
Expand Down Expand Up @@ -134,11 +137,9 @@ def test_convert_to_hls_layers_cnv_w1a1(fused_activation):
assert len(swg_nodes) == 6
mp_nodes = model.get_nodes_by_op_type("StreamingMaxPool_Batch")
assert len(mp_nodes) == 2
# model.save("cnv-pre-compile.onnx")
model = model.transform(PrepareCppSim())
model = model.transform(CompileCppSim())
model = model.transform(SetExecMode("cppsim"))
# model.save("cnv-post-compile.onnx")
produced_ctx = oxe.execute_onnx(model, input_dict, True)
produced = produced_ctx[model.graph.output[0].name]
assert np.isclose(expected, produced, atol=1e-3).all()
Expand Down
13 changes: 10 additions & 3 deletions tests/fpgadataflow/test_convert_to_hls_layers_fc.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
import onnx.numpy_helper as nph
import os
import torch
from brevitas.export import export_finn_onnx
from brevitas.export import export_qonnx
from pkgutil import get_data
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.custom_op.registry import getCustomOp
Expand All @@ -45,13 +45,15 @@
GiveUniqueParameterTensors,
)
from qonnx.transformation.infer_shapes import InferShapes
from qonnx.util.cleanup import cleanup as qonnx_cleanup

import finn.core.onnx_exec as oxe
import finn.transformation.fpgadataflow.convert_to_hls_layers as to_hls
import finn.transformation.streamline.absorb as absorb
from finn.transformation.fpgadataflow.compile_cppsim import CompileCppSim
from finn.transformation.fpgadataflow.prepare_cppsim import PrepareCppSim
from finn.transformation.fpgadataflow.set_exec_mode import SetExecMode
from finn.transformation.qonnx.convert_qonnx_to_finn import ConvertQONNXtoFINN
from finn.transformation.streamline import Streamline
from finn.transformation.streamline.round_thresholds import RoundAndClipThresholds
from finn.util.test import get_test_model_trained
Expand All @@ -63,8 +65,10 @@
@pytest.mark.vivado
def test_convert_to_hls_layers_tfc_w1a1():
tfc = get_test_model_trained("TFC", 1, 1)
export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path)
export_qonnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path)
qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(ConvertQONNXtoFINN())
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(GiveUniqueNodeNames())
Expand Down Expand Up @@ -135,8 +139,11 @@ def test_convert_to_hls_layers_tfc_w1a1():
@pytest.mark.vivado
def test_convert_to_hls_layers_tfc_w1a2():
tfc = get_test_model_trained("TFC", 1, 2)
export_finn_onnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path)
export_qonnx(tfc, torch.randn(1, 1, 28, 28), export_onnx_path)
qonnx_cleanup(export_onnx_path, out_file=export_onnx_path)
model = ModelWrapper(export_onnx_path)
model = model.transform(ConvertQONNXtoFINN())
model.save(export_onnx_path)
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(GiveUniqueNodeNames())
Expand Down
31 changes: 4 additions & 27 deletions tests/transformation/test_qonnx_to_finn.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,12 +35,9 @@
import onnx
import onnx.numpy_helper as nph
import torch
from brevitas.export import export_finn_onnx, export_qonnx
from brevitas.export import export_qonnx
from pkgutil import get_data
from qonnx.core.modelwrapper import ModelWrapper
from qonnx.transformation.fold_constants import FoldConstants
from qonnx.transformation.general import GiveUniqueNodeNames, RemoveStaticGraphInputs
from qonnx.transformation.infer_shapes import InferShapes
from qonnx.util.cleanup import cleanup
from tempfile import TemporaryDirectory

Expand Down Expand Up @@ -106,32 +103,12 @@ def test_QONNX_to_FINN(model_name, wbits, abits):
brev_model, in_shape, input_tensor = get_brev_model_and_sample_inputs(model_name, wbits, abits)
temp_dir = TemporaryDirectory()
qonnx_base_path = temp_dir.name + "/qonnx_{}.onnx"
finn_base_path = temp_dir.name + "/finn_{}.onnx"

# Get Brevitas output
torch_input_tensor = torch.from_numpy(input_tensor).float()
brev_output = brev_model.forward(torch_input_tensor).detach().numpy()

# Get "clean" FINN model and its output
_ = export_finn_onnx(brev_model, torch.randn(in_shape), finn_base_path.format("raw"))
model = ModelWrapper(finn_base_path.format("raw"))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(InferShapes())
model = model.transform(FoldConstants())
model = model.transform(RemoveStaticGraphInputs())
model.save(finn_base_path.format("clean"))

model = ModelWrapper(finn_base_path.format("clean"))
input_dict = {model.graph.input[0].name: input_tensor}
output_dict = oxe.execute_onnx(model, input_dict, False)
finn_export_output = output_dict[model.graph.output[0].name]
# This test always fails on MobileNet for some reason
if model_name != "mobilenet":
assert np.isclose(
brev_output, finn_export_output, atol=ATOL
).all(), "The output of the Brevitas model and the FINN model should match."

# Get the equivalent QONNX model
# Get QONNX model
_ = export_qonnx(brev_model, torch.randn(in_shape), qonnx_base_path.format("raw"))
cleanup(qonnx_base_path.format("raw"), out_file=qonnx_base_path.format("clean"))

Expand All @@ -146,7 +123,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits):
# This test always fails on MobileNet for some reason
if model_name != "mobilenet":
assert np.isclose(
qonnx_export_output, finn_export_output, atol=ATOL
brev_output, qonnx_export_output, atol=ATOL
).all(), "The output of the FINN model and the QONNX model should match."

# Run QONNX to FINN conversion
Expand All @@ -159,7 +136,7 @@ def test_QONNX_to_FINN(model_name, wbits, abits):
input_dict = {model.graph.input[0].name: input_tensor}
output_dict = oxe.execute_onnx(model, input_dict, False)
test_output = output_dict[model.graph.output[0].name]
assert np.isclose(test_output, finn_export_output, atol=ATOL).all(), (
assert np.isclose(test_output, qonnx_export_output, atol=ATOL).all(), (
"The output of the FINN model " "and the QONNX -> FINN converted model should match."
)

Expand Down

0 comments on commit 85f37d4

Please sign in to comment.