diff --git a/e2eshark/onnx/models/adv_inception_v3_vaiq/model.py b/e2eshark/onnx/models/adv_inception_v3_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/adv_inception_v3_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_focus_l_train_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_focus_l_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_focus_l_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_focus_l_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_focus_l_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_focus_l_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_focus_m_train_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_focus_m_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_focus_m_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_focus_m_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_focus_m_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_focus_m_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_l_train_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_l_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_l_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_l_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_l_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_l_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_m_train_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_m_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_m_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_m_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_m_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_m_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_x_train_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_x_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_x_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3darknet_x_vaiq/model.py b/e2eshark/onnx/models/cs3darknet_x_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3darknet_x_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3edgenet_x_train_vaiq/model.py b/e2eshark/onnx/models/cs3edgenet_x_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3edgenet_x_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3edgenet_x_vaiq/model.py b/e2eshark/onnx/models/cs3edgenet_x_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3edgenet_x_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3se_edgenet_x_train_vaiq/model.py b/e2eshark/onnx/models/cs3se_edgenet_x_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3se_edgenet_x_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3se_edgenet_x_vaiq/model.py b/e2eshark/onnx/models/cs3se_edgenet_x_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/cs3se_edgenet_x_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3sedarknet_l_train_vaiq/model.py b/e2eshark/onnx/models/cs3sedarknet_l_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3sedarknet_l_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3sedarknet_l_vaiq/model.py b/e2eshark/onnx/models/cs3sedarknet_l_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3sedarknet_l_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3sedarknet_x_train_vaiq/model.py b/e2eshark/onnx/models/cs3sedarknet_x_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cs3sedarknet_x_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cs3sedarknet_x_vaiq/model.py b/e2eshark/onnx/models/cs3sedarknet_x_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/cs3sedarknet_x_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cspdarknet53_vaiq/model.py b/e2eshark/onnx/models/cspdarknet53_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cspdarknet53_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cspresnet50_vaiq/model.py b/e2eshark/onnx/models/cspresnet50_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cspresnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/cspresnext50_vaiq/model.py b/e2eshark/onnx/models/cspresnext50_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/cspresnext50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/densenet121_test_vaiq/model.py b/e2eshark/onnx/models/densenet121_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/densenet121_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/densenet121_vaiq/model.py b/e2eshark/onnx/models/densenet121_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/densenet121_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/densenet161_vaiq/model.py b/e2eshark/onnx/models/densenet161_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/densenet161_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/densenet169_vaiq/model.py b/e2eshark/onnx/models/densenet169_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/densenet169_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/densenetblur121d_test_vaiq/model.py b/e2eshark/onnx/models/densenetblur121d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/densenetblur121d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/densenetblur121d_vaiq/model.py b/e2eshark/onnx/models/densenetblur121d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/densenetblur121d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla102_vaiq/model.py b/e2eshark/onnx/models/dla102_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla102_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla102x2_vaiq/model.py b/e2eshark/onnx/models/dla102x2_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla102x2_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla102x_vaiq/model.py b/e2eshark/onnx/models/dla102x_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla102x_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla34_vaiq/model.py b/e2eshark/onnx/models/dla34_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla34_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla46_c_vaiq/model.py b/e2eshark/onnx/models/dla46_c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla46_c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla46x_c_vaiq/model.py b/e2eshark/onnx/models/dla46x_c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla46x_c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla60_res2net_vaiq/model.py b/e2eshark/onnx/models/dla60_res2net_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla60_res2net_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla60_res2next_vaiq/model.py b/e2eshark/onnx/models/dla60_res2next_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla60_res2next_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla60_vaiq/model.py b/e2eshark/onnx/models/dla60_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla60_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla60x_c_vaiq/model.py b/e2eshark/onnx/models/dla60x_c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla60x_c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dla60x_vaiq/model.py b/e2eshark/onnx/models/dla60x_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dla60x_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dm_nfnet_f0.dm_in1k_train_vaiq/model.py b/e2eshark/onnx/models/dm_nfnet_f0.dm_in1k_train_vaiq/model.py new file mode 100644 index 000000000..aefb3a075 --- /dev/null +++ b/e2eshark/onnx/models/dm_nfnet_f0.dm_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(192, 192) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dm_nfnet_f0.dm_in1k_vaiq/model.py b/e2eshark/onnx/models/dm_nfnet_f0.dm_in1k_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/dm_nfnet_f0.dm_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dm_nfnet_f1.dm_in1k_train_vaiq/model.py b/e2eshark/onnx/models/dm_nfnet_f1.dm_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dm_nfnet_f1.dm_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dm_nfnet_f1.dm_in1k_vaiq/model.py b/e2eshark/onnx/models/dm_nfnet_f1.dm_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/dm_nfnet_f1.dm_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn107_vaiq/model.py b/e2eshark/onnx/models/dpn107_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dpn107_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn131_vaiq/model.py b/e2eshark/onnx/models/dpn131_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dpn131_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn68_vaiq/model.py b/e2eshark/onnx/models/dpn68_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dpn68_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn68b_test_vaiq/model.py b/e2eshark/onnx/models/dpn68b_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/dpn68b_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn68b_vaiq/model.py b/e2eshark/onnx/models/dpn68b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dpn68b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn92_vaiq/model.py b/e2eshark/onnx/models/dpn92_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dpn92_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/dpn98_vaiq/model.py b/e2eshark/onnx/models/dpn98_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/dpn98_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_nfnet_l0.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/eca_nfnet_l0.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/eca_nfnet_l0.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_nfnet_l0.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/eca_nfnet_l0.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/eca_nfnet_l0.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_nfnet_l1.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/eca_nfnet_l1.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/eca_nfnet_l1.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_nfnet_l1.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/eca_nfnet_l1.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/eca_nfnet_l1.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_nfnet_l2.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/eca_nfnet_l2.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/eca_nfnet_l2.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_nfnet_l2.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/eca_nfnet_l2.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..c662b18fa --- /dev/null +++ b/e2eshark/onnx/models/eca_nfnet_l2.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(384, 384) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_resnet33ts.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/eca_resnet33ts.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/eca_resnet33ts.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_resnet33ts.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/eca_resnet33ts.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/eca_resnet33ts.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_resnext26ts.ch_in1k_train_vaiq/model.py b/e2eshark/onnx/models/eca_resnext26ts.ch_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/eca_resnext26ts.ch_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/eca_resnext26ts.ch_in1k_vaiq/model.py b/e2eshark/onnx/models/eca_resnext26ts.ch_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/eca_resnext26ts.ch_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet101d_pruned_test_vaiq/model.py b/e2eshark/onnx/models/ecaresnet101d_pruned_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet101d_pruned_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet101d_pruned_vaiq/model.py b/e2eshark/onnx/models/ecaresnet101d_pruned_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet101d_pruned_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet101d_test_vaiq/model.py b/e2eshark/onnx/models/ecaresnet101d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet101d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet101d_vaiq/model.py b/e2eshark/onnx/models/ecaresnet101d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet101d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet26t_train_vaiq/model.py b/e2eshark/onnx/models/ecaresnet26t_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet26t_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet26t_vaiq/model.py b/e2eshark/onnx/models/ecaresnet26t_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet26t_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet50d_pruned_test_vaiq/model.py b/e2eshark/onnx/models/ecaresnet50d_pruned_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet50d_pruned_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet50d_pruned_vaiq/model.py b/e2eshark/onnx/models/ecaresnet50d_pruned_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet50d_pruned_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet50d_test_vaiq/model.py b/e2eshark/onnx/models/ecaresnet50d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet50d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet50d_vaiq/model.py b/e2eshark/onnx/models/ecaresnet50d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet50d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet50t_train_vaiq/model.py b/e2eshark/onnx/models/ecaresnet50t_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet50t_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnet50t_vaiq/model.py b/e2eshark/onnx/models/ecaresnet50t_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnet50t_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnetlight_test_vaiq/model.py b/e2eshark/onnx/models/ecaresnetlight_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ecaresnetlight_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ecaresnetlight_vaiq/model.py b/e2eshark/onnx/models/ecaresnetlight_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ecaresnetlight_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnet_b1.ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b1.ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/efficientnet_b1.ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnet_b1.ft_in1k_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b1.ft_in1k_vaiq/model.py index 54fc5be6a..4febb244e 100644 --- a/e2eshark/onnx/models/efficientnet_b1.ft_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/efficientnet_b1.ft_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image() +img_ycbcr = setup_test_image(256, 256) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/efficientnet_b2.ra_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b2.ra_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/efficientnet_b2.ra_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnet_b2.ra_in1k_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b2.ra_in1k_vaiq/model.py index 4febb244e..f221670c0 100644 --- a/e2eshark/onnx/models/efficientnet_b2.ra_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/efficientnet_b2.ra_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(256, 256) +img_ycbcr = setup_test_image(288, 288) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_vaiq/model.py index f221670c0..153acc745 100644 --- a/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/efficientnet_b3.ra2_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(288, 288) +img_ycbcr = setup_test_image(320, 320) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_vaiq/model.py index 153acc745..c662b18fa 100644 --- a/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/efficientnet_b4.ra2_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(320, 320) +img_ycbcr = setup_test_image(384, 384) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_train_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_vaiq/model.py b/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_vaiq/model.py index 153acc745..cedaaf7ab 100644 --- a/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/efficientnetv2_rw_m.agc_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(320, 320) +img_ycbcr = setup_test_image(416, 416) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_vaiq/model.py index f221670c0..c662b18fa 100644 --- a/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/efficientnetv2_rw_s.ra2_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(288, 288) +img_ycbcr = setup_test_image(384, 384) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/efficientnetv2_rw_t.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/efficientnetv2_rw_t.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/efficientnetv2_rw_t.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ens_adv_inception_resnet_v2_vaiq/model.py b/e2eshark/onnx/models/ens_adv_inception_resnet_v2_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/ens_adv_inception_resnet_v2_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ese_vovnet19b_dw_test_vaiq/model.py b/e2eshark/onnx/models/ese_vovnet19b_dw_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ese_vovnet19b_dw_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ese_vovnet19b_dw_vaiq/model.py b/e2eshark/onnx/models/ese_vovnet19b_dw_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ese_vovnet19b_dw_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ese_vovnet39b_test_vaiq/model.py b/e2eshark/onnx/models/ese_vovnet39b_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/ese_vovnet39b_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ese_vovnet39b_vaiq/model.py b/e2eshark/onnx/models/ese_vovnet39b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ese_vovnet39b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/fbnetv3_b.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/fbnetv3_b.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/fbnetv3_b.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/fbnetv3_b.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/fbnetv3_b.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/fbnetv3_b.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/fbnetv3_d.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/fbnetv3_d.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/fbnetv3_d.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/fbnetv3_d.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/fbnetv3_d.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/fbnetv3_d.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/fbnetv3_g.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/fbnetv3_g.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..e6974428d --- /dev/null +++ b/e2eshark/onnx/models/fbnetv3_g.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(240, 240) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/fbnetv3_g.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/fbnetv3_g.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/fbnetv3_g.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gc_efficientnetv2_rw_t.agc_in1k_train_vaiq/model.py b/e2eshark/onnx/models/gc_efficientnetv2_rw_t.agc_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gc_efficientnetv2_rw_t.agc_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gc_efficientnetv2_rw_t.agc_in1k_vaiq/model.py b/e2eshark/onnx/models/gc_efficientnetv2_rw_t.agc_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/gc_efficientnetv2_rw_t.agc_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnet33ts.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/gcresnet33ts.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/gcresnet33ts.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnet33ts.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/gcresnet33ts.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/gcresnet33ts.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnet50t.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/gcresnet50t.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/gcresnet50t.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnet50t.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/gcresnet50t.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/gcresnet50t.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnext26ts.ch_in1k_train_vaiq/model.py b/e2eshark/onnx/models/gcresnext26ts.ch_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/gcresnext26ts.ch_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnext26ts.ch_in1k_vaiq/model.py b/e2eshark/onnx/models/gcresnext26ts.ch_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/gcresnext26ts.ch_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnext50ts.ch_in1k_train_vaiq/model.py b/e2eshark/onnx/models/gcresnext50ts.ch_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/gcresnext50ts.ch_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gcresnext50ts.ch_in1k_vaiq/model.py b/e2eshark/onnx/models/gcresnext50ts.ch_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/gcresnext50ts.ch_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ghostnet_100_vaiq/model.py b/e2eshark/onnx/models/ghostnet_100_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ghostnet_100_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_inception_v3_vaiq/model.py b/e2eshark/onnx/models/gluon_inception_v3_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/gluon_inception_v3_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet101_v1b_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet101_v1b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet101_v1b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet101_v1c_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet101_v1c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet101_v1c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet101_v1d_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet101_v1d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet101_v1d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet101_v1s_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet101_v1s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet101_v1s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet152_v1b_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet152_v1b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet152_v1b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet152_v1c_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet152_v1c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet152_v1c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet152_v1d_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet152_v1d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet152_v1d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet152_v1s_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet152_v1s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet152_v1s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet18_v1b_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet18_v1b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet18_v1b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet34_v1b_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet34_v1b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet34_v1b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet50_v1b_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet50_v1b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet50_v1b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet50_v1c_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet50_v1c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet50_v1c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet50_v1d_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet50_v1d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet50_v1d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnet50_v1s_vaiq/model.py b/e2eshark/onnx/models/gluon_resnet50_v1s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnet50_v1s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnext101_32x4d_vaiq/model.py b/e2eshark/onnx/models/gluon_resnext101_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnext101_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnext101_64x4d_vaiq/model.py b/e2eshark/onnx/models/gluon_resnext101_64x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnext101_64x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_resnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/gluon_resnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_resnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_senet154_vaiq/model.py b/e2eshark/onnx/models/gluon_senet154_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_senet154_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_seresnext101_32x4d_vaiq/model.py b/e2eshark/onnx/models/gluon_seresnext101_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_seresnext101_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_seresnext101_64x4d_vaiq/model.py b/e2eshark/onnx/models/gluon_seresnext101_64x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_seresnext101_64x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/gluon_seresnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/gluon_seresnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/gluon_seresnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hardcorenas_a_vaiq/model.py b/e2eshark/onnx/models/hardcorenas_a_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hardcorenas_a_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hardcorenas_b_vaiq/model.py b/e2eshark/onnx/models/hardcorenas_b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hardcorenas_b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hardcorenas_c_vaiq/model.py b/e2eshark/onnx/models/hardcorenas_c_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hardcorenas_c_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hardcorenas_d_vaiq/model.py b/e2eshark/onnx/models/hardcorenas_d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hardcorenas_d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hardcorenas_e_vaiq/model.py b/e2eshark/onnx/models/hardcorenas_e_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hardcorenas_e_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hardcorenas_f_vaiq/model.py b/e2eshark/onnx/models/hardcorenas_f_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hardcorenas_f_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w18_small_v2_vaiq/model.py b/e2eshark/onnx/models/hrnet_w18_small_v2_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w18_small_v2_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w18_small_vaiq/model.py b/e2eshark/onnx/models/hrnet_w18_small_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w18_small_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w18_vaiq/model.py b/e2eshark/onnx/models/hrnet_w18_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w18_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w30_vaiq/model.py b/e2eshark/onnx/models/hrnet_w30_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w30_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w32_vaiq/model.py b/e2eshark/onnx/models/hrnet_w32_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w32_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w40_vaiq/model.py b/e2eshark/onnx/models/hrnet_w40_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w40_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w44_vaiq/model.py b/e2eshark/onnx/models/hrnet_w44_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w44_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w48_vaiq/model.py b/e2eshark/onnx/models/hrnet_w48_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w48_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/hrnet_w64_vaiq/model.py b/e2eshark/onnx/models/hrnet_w64_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/hrnet_w64_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ig_resnext101_32x16d_vaiq/model.py b/e2eshark/onnx/models/ig_resnext101_32x16d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ig_resnext101_32x16d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ig_resnext101_32x32d_vaiq/model.py b/e2eshark/onnx/models/ig_resnext101_32x32d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ig_resnext101_32x32d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ig_resnext101_32x8d_vaiq/model.py b/e2eshark/onnx/models/ig_resnext101_32x8d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ig_resnext101_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/inception_resnet_v2_vaiq/model.py b/e2eshark/onnx/models/inception_resnet_v2_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/inception_resnet_v2_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/inception_v3_vaiq/model.py b/e2eshark/onnx/models/inception_v3_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/inception_v3_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/inception_v4_vaiq/model.py b/e2eshark/onnx/models/inception_v4_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/inception_v4_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_senet154_vaiq/model.py b/e2eshark/onnx/models/legacy_senet154_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_senet154_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnet101_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnet101_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnet101_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnet152_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnet152_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnet152_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnet18_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnet18_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnet18_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnet34_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnet34_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnet34_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnet50_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnet50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnext101_32x4d_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnext101_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnext101_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnext26_32x4d_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnext26_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnext26_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/legacy_seresnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/legacy_seresnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/legacy_seresnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/mixnet_l.ft_in1k_vaiq/model.py b/e2eshark/onnx/models/mixnet_l.ft_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/mixnet_l.ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/mixnet_m.ft_in1k_vaiq/model.py b/e2eshark/onnx/models/mixnet_m.ft_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/mixnet_m.ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/mixnet_s.ft_in1k_vaiq/model.py b/e2eshark/onnx/models/mixnet_s.ft_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/mixnet_s.ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/mixnet_xl.ra_in1k_vaiq/model.py b/e2eshark/onnx/models/mixnet_xl.ra_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/mixnet_xl.ra_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/mobilenetv3_large_100.miil_in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/mobilenetv3_large_100.miil_in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/mobilenetv3_large_100.miil_in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/mobilenetv3_rw.rmsp_in1k_vaiq/model.py b/e2eshark/onnx/models/mobilenetv3_rw.rmsp_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/mobilenetv3_rw.rmsp_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/nf_regnet_b1.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/nf_regnet_b1.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/nf_regnet_b1.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/nf_regnet_b1.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/nf_regnet_b1.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/nf_regnet_b1.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/nf_resnet50.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/nf_resnet50.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/nf_resnet50.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/nf_resnet50.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/nf_resnet50.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/nf_resnet50.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/nfnet_l0.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/nfnet_l0.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/nfnet_l0.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/nfnet_l0.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/nfnet_l0.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/nfnet_l0.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetv_040.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetv_040.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetv_040.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetv_040.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetv_040.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnetv_040.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetv_064.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetv_064.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetv_064.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetv_064.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetv_064.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnetv_064.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_002.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_002.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_002.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_004.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_004.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..b0bf3339e --- /dev/null +++ b/e2eshark/onnx/models/regnetx_004.pycls_in1k_vaiq/model.py @@ -0,0 +1,49 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) + +# Post process output to do: +E2ESHARK_CHECK["postprocess"] = [ + (torch.nn.functional.softmax, [0], False, 0), + (torch.topk, [1], True, 1), +] diff --git a/e2eshark/onnx/models/regnetx_004_tv.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_004_tv.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_004_tv.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_006.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_006.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_006.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_008.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_008.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_008.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_008.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_008.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_008.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_016.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_016.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_016.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_016.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_016.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_016.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_032.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_032.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_032.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_032.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_032.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_032.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_040.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_040.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_040.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_064.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_064.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_064.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_080.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_080.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_080.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_080.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_080.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_080.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_120.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_120.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_120.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_160.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_160.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_160.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_160.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_160.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..b0bf3339e --- /dev/null +++ b/e2eshark/onnx/models/regnetx_160.tv2_in1k_vaiq/model.py @@ -0,0 +1,49 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) + +# Post process output to do: +E2ESHARK_CHECK["postprocess"] = [ + (torch.nn.functional.softmax, [0], False, 0), + (torch.topk, [1], True, 1), +] diff --git a/e2eshark/onnx/models/regnetx_320.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_320.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_320.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetx_320.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetx_320.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetx_320.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_002.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_002.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_002.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_004.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_004.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_004.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_004.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_004.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_004.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_006.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_006.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_006.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_008.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_008.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_008.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_008_tv.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_008_tv.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_008_tv.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_016.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_016.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_016.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_016.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_016.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_016.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_032.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_032.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_032.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_032.ra_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_032.ra_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_032.ra_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_032.ra_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_032.ra_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_032.ra_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_032.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_032.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_032.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_040.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_040.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_040.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_040.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_040.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_040.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_040.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_040.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_040.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_064.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_064.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_064.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_064.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_064.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_064.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_064.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_064.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_064.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_080.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_080.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_080.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_080.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_080.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_080.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_080.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_080.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_080.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_080_tv.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_080_tv.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_080_tv.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_120.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_120.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_120.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_120.sw_in12k_ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_120.sw_in12k_ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_120.sw_in12k_ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_120.sw_in12k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_120.sw_in12k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_120.sw_in12k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.lion_in12k_ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_160.lion_in12k_ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.lion_in12k_ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.lion_in12k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_160.lion_in12k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.lion_in12k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_160.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.sw_in12k_ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnety_160.sw_in12k_ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.sw_in12k_ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.sw_in12k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_160.sw_in12k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.sw_in12k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.swag_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_160.swag_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..c662b18fa --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.swag_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(384, 384) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.swag_lc_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_160.swag_lc_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.swag_lc_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_160.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_160.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_160.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_320.pycls_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_320.pycls_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_320.pycls_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_320.seer_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_320.seer_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..c662b18fa --- /dev/null +++ b/e2eshark/onnx/models/regnety_320.seer_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(384, 384) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_320.swag_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_320.swag_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..c662b18fa --- /dev/null +++ b/e2eshark/onnx/models/regnety_320.swag_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(384, 384) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_320.swag_lc_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_320.swag_lc_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_320.swag_lc_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_320.tv2_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_320.tv2_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnety_320.tv2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnety_640.seer_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/regnety_640.seer_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..c662b18fa --- /dev/null +++ b/e2eshark/onnx/models/regnety_640.seer_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(384, 384) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_040.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_040.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_040.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_040.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_040.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_040.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_040_h.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_040_h.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_040_h.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_040_h.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_040_h.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_040_h.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_b16.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_b16.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/regnetz_b16.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_b16.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_b16.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_b16.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_c16.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_c16.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_c16.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_c16.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_c16.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_c16.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_c16_evos.ch_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_c16_evos.ch_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_c16_evos.ch_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_c16_evos.ch_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_c16_evos.ch_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_c16_evos.ch_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_d32.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_d32.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_d32.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_d32.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_d32.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_d32.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_d8.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_d8.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_d8.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_d8.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_d8.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_d8.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_d8_evos.ch_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_d8_evos.ch_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_d8_evos.ch_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_d8_evos.ch_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_d8_evos.ch_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_d8_evos.ch_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_e8.ra3_in1k_train_vaiq/model.py b/e2eshark/onnx/models/regnetz_e8.ra3_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/regnetz_e8.ra3_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/regnetz_e8.ra3_in1k_vaiq/model.py b/e2eshark/onnx/models/regnetz_e8.ra3_in1k_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/regnetz_e8.ra3_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_a2.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_a2.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_a2.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b0.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b0.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b0.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b1.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b1.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b1.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b1g4.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b1g4.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b1g4.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b2.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b2.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b2.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b2g4.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b2g4.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b2g4.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b3.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b3.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b3.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/repvgg_b3g4.rvgg_in1k_vaiq/model.py b/e2eshark/onnx/models/repvgg_b3g4.rvgg_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/repvgg_b3g4.rvgg_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2net101_26w_4s_vaiq/model.py b/e2eshark/onnx/models/res2net101_26w_4s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2net101_26w_4s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2net50_14w_8s_vaiq/model.py b/e2eshark/onnx/models/res2net50_14w_8s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2net50_14w_8s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2net50_26w_4s_vaiq/model.py b/e2eshark/onnx/models/res2net50_26w_4s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2net50_26w_4s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2net50_26w_6s_vaiq/model.py b/e2eshark/onnx/models/res2net50_26w_6s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2net50_26w_6s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2net50_26w_8s_vaiq/model.py b/e2eshark/onnx/models/res2net50_26w_8s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2net50_26w_8s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2net50_48w_2s_vaiq/model.py b/e2eshark/onnx/models/res2net50_48w_2s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2net50_48w_2s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/res2next50_vaiq/model.py b/e2eshark/onnx/models/res2next50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/res2next50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_12_224.fb_distilled_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_12_224.fb_distilled_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_12_224.fb_distilled_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_12_224.fb_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_12_224.fb_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_12_224.fb_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_24_224.fb_distilled_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_24_224.fb_distilled_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_24_224.fb_distilled_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_24_224.fb_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_24_224.fb_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_24_224.fb_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_36_224.fb_distilled_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_36_224.fb_distilled_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_36_224.fb_distilled_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_36_224.fb_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_36_224.fb_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_36_224.fb_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resmlp_big_24_224.fb_distilled_in1k_vaiq/model.py b/e2eshark/onnx/models/resmlp_big_24_224.fb_distilled_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resmlp_big_24_224.fb_distilled_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnest101e_vaiq/model.py b/e2eshark/onnx/models/resnest101e_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnest101e_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnest14d_vaiq/model.py b/e2eshark/onnx/models/resnest14d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnest14d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnest26d_vaiq/model.py b/e2eshark/onnx/models/resnest26d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnest26d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnest50d_1s4x24d_vaiq/model.py b/e2eshark/onnx/models/resnest50d_1s4x24d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnest50d_1s4x24d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnest50d_4s2x40d_vaiq/model.py b/e2eshark/onnx/models/resnest50d_4s2x40d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnest50d_4s2x40d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnest50d_vaiq/model.py b/e2eshark/onnx/models/resnest50d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnest50d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet101_test_vaiq/model.py b/e2eshark/onnx/models/resnet101_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet101_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet101_vaiq/model.py b/e2eshark/onnx/models/resnet101_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet101_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet101d_train_vaiq/model.py b/e2eshark/onnx/models/resnet101d_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet101d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet101d_vaiq/model.py b/e2eshark/onnx/models/resnet101d_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/resnet101d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet10t_train_vaiq/model.py b/e2eshark/onnx/models/resnet10t_train_vaiq/model.py new file mode 100644 index 000000000..fa8178ff9 --- /dev/null +++ b/e2eshark/onnx/models/resnet10t_train_vaiq/model.py @@ -0,0 +1,44 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(176, 176) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) + diff --git a/e2eshark/onnx/models/resnet10t_vaiq/model.py b/e2eshark/onnx/models/resnet10t_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet10t_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet14t_train_vaiq/model.py b/e2eshark/onnx/models/resnet14t_train_vaiq/model.py new file mode 100644 index 000000000..565f77691 --- /dev/null +++ b/e2eshark/onnx/models/resnet14t_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(176, 176) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet14t_vaiq/model.py b/e2eshark/onnx/models/resnet14t_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet14t_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet152_test_vaiq/model.py b/e2eshark/onnx/models/resnet152_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet152_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet152_vaiq/model.py b/e2eshark/onnx/models/resnet152_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet152_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet152d_train_vaiq/model.py b/e2eshark/onnx/models/resnet152d_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet152d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet152d_vaiq/model.py b/e2eshark/onnx/models/resnet152d_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/resnet152d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet18_test_vaiq/model.py b/e2eshark/onnx/models/resnet18_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet18_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet18_vaiq/model.py b/e2eshark/onnx/models/resnet18_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet18_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet18d_test_vaiq/model.py b/e2eshark/onnx/models/resnet18d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet18d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet18d_vaiq/model.py b/e2eshark/onnx/models/resnet18d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet18d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet200d_train_vaiq/model.py b/e2eshark/onnx/models/resnet200d_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet200d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet200d_vaiq/model.py b/e2eshark/onnx/models/resnet200d_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/resnet200d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet26_test_vaiq/model.py b/e2eshark/onnx/models/resnet26_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet26_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet26_vaiq/model.py b/e2eshark/onnx/models/resnet26_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet26_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet26d_test_vaiq/model.py b/e2eshark/onnx/models/resnet26d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet26d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet26d_vaiq/model.py b/e2eshark/onnx/models/resnet26d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet26d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet26t_test_vaiq/model.py b/e2eshark/onnx/models/resnet26t_test_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/resnet26t_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet26t_vaiq/model.py b/e2eshark/onnx/models/resnet26t_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet26t_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet32ts.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnet32ts.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet32ts.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet32ts.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/resnet32ts.ra2_in1k_vaiq/model.py index 4febb244e..f221670c0 100644 --- a/e2eshark/onnx/models/resnet32ts.ra2_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/resnet32ts.ra2_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(256, 256) +img_ycbcr = setup_test_image(288, 288) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/resnet33ts.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnet33ts.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet33ts.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet33ts.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/resnet33ts.ra2_in1k_vaiq/model.py index 4febb244e..f221670c0 100644 --- a/e2eshark/onnx/models/resnet33ts.ra2_in1k_vaiq/model.py +++ b/e2eshark/onnx/models/resnet33ts.ra2_in1k_vaiq/model.py @@ -22,7 +22,7 @@ # Even if model is quantized, the inputs and outputs are # not, so apply float32 # Get and process the image -img_ycbcr = setup_test_image(256, 256) +img_ycbcr = setup_test_image(288, 288) model_input_X = to_numpy(img_ycbcr) diff --git a/e2eshark/onnx/models/resnet34_test_vaiq/model.py b/e2eshark/onnx/models/resnet34_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet34_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet34_vaiq/model.py b/e2eshark/onnx/models/resnet34_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet34_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet34d_test_vaiq/model.py b/e2eshark/onnx/models/resnet34d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet34d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet34d_vaiq/model.py b/e2eshark/onnx/models/resnet34d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet34d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet50_gn_test_vaiq/model.py b/e2eshark/onnx/models/resnet50_gn_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet50_gn_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet50_gn_vaiq/model.py b/e2eshark/onnx/models/resnet50_gn_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet50_gn_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet50_test_vaiq/model.py b/e2eshark/onnx/models/resnet50_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet50_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet50_vaiq/model.py b/e2eshark/onnx/models/resnet50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet50d_test_vaiq/model.py b/e2eshark/onnx/models/resnet50d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet50d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet50d_vaiq/model.py b/e2eshark/onnx/models/resnet50d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnet50d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet51q.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnet51q.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet51q.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet51q.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/resnet51q.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet51q.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet61q.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnet61q.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnet61q.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnet61q.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/resnet61q.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnet61q.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetaa50_train_vaiq/model.py b/e2eshark/onnx/models/resnetaa50_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetaa50_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetaa50_vaiq/model.py b/e2eshark/onnx/models/resnetaa50_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetaa50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetblur50_test_vaiq/model.py b/e2eshark/onnx/models/resnetblur50_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetblur50_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetblur50_vaiq/model.py b/e2eshark/onnx/models/resnetblur50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetblur50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs101_train_vaiq/model.py b/e2eshark/onnx/models/resnetrs101_train_vaiq/model.py new file mode 100644 index 000000000..aefb3a075 --- /dev/null +++ b/e2eshark/onnx/models/resnetrs101_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(192, 192) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs101_vaiq/model.py b/e2eshark/onnx/models/resnetrs101_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetrs101_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs152_train_vaiq/model.py b/e2eshark/onnx/models/resnetrs152_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnetrs152_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs152_vaiq/model.py b/e2eshark/onnx/models/resnetrs152_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/resnetrs152_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs200_train_vaiq/model.py b/e2eshark/onnx/models/resnetrs200_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnetrs200_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs200_vaiq/model.py b/e2eshark/onnx/models/resnetrs200_vaiq/model.py new file mode 100644 index 000000000..c6f07808a --- /dev/null +++ b/e2eshark/onnx/models/resnetrs200_vaiq/model.py @@ -0,0 +1,44 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) + diff --git a/e2eshark/onnx/models/resnetrs50_train_vaiq/model.py b/e2eshark/onnx/models/resnetrs50_train_vaiq/model.py new file mode 100644 index 000000000..fd0c19599 --- /dev/null +++ b/e2eshark/onnx/models/resnetrs50_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(160, 160) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetrs50_vaiq/model.py b/e2eshark/onnx/models/resnetrs50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetrs50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_101.a1h_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnetv2_101.a1h_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_101.a1h_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_101.a1h_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_101.a1h_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_101.a1h_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_101x1_bit.goog_in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_101x1_bit.goog_in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..89a208020 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_101x1_bit.goog_in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(448, 448) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_152x2_bit.goog_in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_152x2_bit.goog_in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..89a208020 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_152x2_bit.goog_in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(448, 448) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384_vaiq/model.py b/e2eshark/onnx/models/resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384_vaiq/model.py new file mode 100644 index 000000000..c662b18fa --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_384_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(384, 384) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_152x2_bit.goog_teacher_in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50.a1h_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50.a1h_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50.a1h_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50.a1h_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50.a1h_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50.a1h_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50d_evos.ah_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50d_evos.ah_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50d_evos.ah_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50d_evos.ah_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50d_evos.ah_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50d_evos.ah_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50d_gn.ah_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50d_gn.ah_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50d_gn.ah_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50d_gn.ah_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50d_gn.ah_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50d_gn.ah_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50x1_bit.goog_distilled_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50x1_bit.goog_distilled_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50x1_bit.goog_distilled_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50x1_bit.goog_in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50x1_bit.goog_in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..89a208020 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50x1_bit.goog_in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(448, 448) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnetv2_50x3_bit.goog_in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/resnetv2_50x3_bit.goog_in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..89a208020 --- /dev/null +++ b/e2eshark/onnx/models/resnetv2_50x3_bit.goog_in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(448, 448) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext101_32x8d_vaiq/model.py b/e2eshark/onnx/models/resnext101_32x8d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnext101_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext101_64x4d_train_vaiq/model.py b/e2eshark/onnx/models/resnext101_64x4d_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnext101_64x4d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext101_64x4d_vaiq/model.py b/e2eshark/onnx/models/resnext101_64x4d_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnext101_64x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext26ts.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/resnext26ts.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/resnext26ts.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext26ts.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/resnext26ts.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnext26ts.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext50_32x4d_test_vaiq/model.py b/e2eshark/onnx/models/resnext50_32x4d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnext50_32x4d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/resnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext50d_32x4d_test_vaiq/model.py b/e2eshark/onnx/models/resnext50d_32x4d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/resnext50d_32x4d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/resnext50d_32x4d_vaiq/model.py b/e2eshark/onnx/models/resnext50d_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/resnext50d_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnet_100.nav_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnet_100.nav_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnet_100.nav_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnet_130.nav_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnet_130.nav_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnet_130.nav_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnet_150.nav_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnet_150.nav_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnet_150.nav_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnet_200.nav_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnet_200.nav_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnet_200.nav_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnet_300.nav_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnet_300.nav_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnet_300.nav_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnetr_200.sw_in12k_ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/rexnetr_200.sw_in12k_ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnetr_200.sw_in12k_ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnetr_200.sw_in12k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnetr_200.sw_in12k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/rexnetr_200.sw_in12k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnetr_300.sw_in12k_ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/rexnetr_300.sw_in12k_ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/rexnetr_300.sw_in12k_ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/rexnetr_300.sw_in12k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/rexnetr_300.sw_in12k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/rexnetr_300.sw_in12k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/selecsls42b_vaiq/model.py b/e2eshark/onnx/models/selecsls42b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/selecsls42b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/selecsls60_vaiq/model.py b/e2eshark/onnx/models/selecsls60_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/selecsls60_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/selecsls60b_vaiq/model.py b/e2eshark/onnx/models/selecsls60b_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/selecsls60b_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/semnasnet_075.rmsp_in1k_vaiq/model.py b/e2eshark/onnx/models/semnasnet_075.rmsp_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/semnasnet_075.rmsp_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/semnasnet_100.rmsp_in1k_vaiq/model.py b/e2eshark/onnx/models/semnasnet_100.rmsp_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/semnasnet_100.rmsp_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/sequencer2d_m_vaiq/model.py b/e2eshark/onnx/models/sequencer2d_m_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/sequencer2d_m_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/sequencer2d_s_vaiq/model.py b/e2eshark/onnx/models/sequencer2d_s_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/sequencer2d_s_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnet152d_train_vaiq/model.py b/e2eshark/onnx/models/seresnet152d_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/seresnet152d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnet152d_vaiq/model.py b/e2eshark/onnx/models/seresnet152d_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/seresnet152d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnet33ts.ra2_in1k_train_vaiq/model.py b/e2eshark/onnx/models/seresnet33ts.ra2_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/seresnet33ts.ra2_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnet33ts.ra2_in1k_vaiq/model.py b/e2eshark/onnx/models/seresnet33ts.ra2_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnet33ts.ra2_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnet50_test_vaiq/model.py b/e2eshark/onnx/models/seresnet50_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnet50_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnet50_vaiq/model.py b/e2eshark/onnx/models/seresnet50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/seresnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext101_32x8d_train_vaiq/model.py b/e2eshark/onnx/models/seresnext101_32x8d_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/seresnext101_32x8d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext101_32x8d_vaiq/model.py b/e2eshark/onnx/models/seresnext101_32x8d_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnext101_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext101d_32x8d_train_vaiq/model.py b/e2eshark/onnx/models/seresnext101d_32x8d_train_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/seresnext101d_32x8d_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext101d_32x8d_vaiq/model.py b/e2eshark/onnx/models/seresnext101d_32x8d_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnext101d_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext26d_32x4d_test_vaiq/model.py b/e2eshark/onnx/models/seresnext26d_32x4d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnext26d_32x4d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext26d_32x4d_vaiq/model.py b/e2eshark/onnx/models/seresnext26d_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/seresnext26d_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext26t_32x4d_test_vaiq/model.py b/e2eshark/onnx/models/seresnext26t_32x4d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnext26t_32x4d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext26t_32x4d_vaiq/model.py b/e2eshark/onnx/models/seresnext26t_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/seresnext26t_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext26ts.ch_in1k_train_vaiq/model.py b/e2eshark/onnx/models/seresnext26ts.ch_in1k_train_vaiq/model.py new file mode 100644 index 000000000..4febb244e --- /dev/null +++ b/e2eshark/onnx/models/seresnext26ts.ch_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(256, 256) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext26ts.ch_in1k_vaiq/model.py b/e2eshark/onnx/models/seresnext26ts.ch_in1k_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnext26ts.ch_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext50_32x4d_test_vaiq/model.py b/e2eshark/onnx/models/seresnext50_32x4d_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnext50_32x4d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/seresnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/seresnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnextaa101d_32x8d_test_vaiq/model.py b/e2eshark/onnx/models/seresnextaa101d_32x8d_test_vaiq/model.py new file mode 100644 index 000000000..153acc745 --- /dev/null +++ b/e2eshark/onnx/models/seresnextaa101d_32x8d_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(320, 320) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/seresnextaa101d_32x8d_vaiq/model.py b/e2eshark/onnx/models/seresnextaa101d_32x8d_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/seresnextaa101d_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/skresnet18_vaiq/model.py b/e2eshark/onnx/models/skresnet18_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/skresnet18_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/skresnet34_vaiq/model.py b/e2eshark/onnx/models/skresnet34_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/skresnet34_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/skresnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/skresnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/skresnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/spnasnet_100.rmsp_in1k_vaiq/model.py b/e2eshark/onnx/models/spnasnet_100.rmsp_in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/spnasnet_100.rmsp_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ssl_resnet18_vaiq/model.py b/e2eshark/onnx/models/ssl_resnet18_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ssl_resnet18_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ssl_resnet50_vaiq/model.py b/e2eshark/onnx/models/ssl_resnet50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ssl_resnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ssl_resnext101_32x16d_vaiq/model.py b/e2eshark/onnx/models/ssl_resnext101_32x16d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ssl_resnext101_32x16d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ssl_resnext101_32x4d_vaiq/model.py b/e2eshark/onnx/models/ssl_resnext101_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ssl_resnext101_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ssl_resnext101_32x8d_vaiq/model.py b/e2eshark/onnx/models/ssl_resnext101_32x8d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ssl_resnext101_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/ssl_resnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/ssl_resnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/ssl_resnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/swsl_resnet18_vaiq/model.py b/e2eshark/onnx/models/swsl_resnet18_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/swsl_resnet18_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/swsl_resnet50_vaiq/model.py b/e2eshark/onnx/models/swsl_resnet50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/swsl_resnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/swsl_resnext101_32x16d_vaiq/model.py b/e2eshark/onnx/models/swsl_resnext101_32x16d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/swsl_resnext101_32x16d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/swsl_resnext101_32x4d_vaiq/model.py b/e2eshark/onnx/models/swsl_resnext101_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/swsl_resnext101_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/swsl_resnext101_32x8d_vaiq/model.py b/e2eshark/onnx/models/swsl_resnext101_32x8d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/swsl_resnext101_32x8d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/swsl_resnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/swsl_resnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/swsl_resnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_el.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_el.in1k_vaiq/model.py new file mode 100644 index 000000000..240d45f78 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_el.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(300, 300) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_em.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_em.in1k_vaiq/model.py new file mode 100644 index 000000000..e6974428d --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_em.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(240, 240) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_es.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_es.in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_es.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_lite0.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_lite0.in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_lite0.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_lite1.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_lite1.in1k_vaiq/model.py new file mode 100644 index 000000000..e6974428d --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_lite1.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(240, 240) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_lite2.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_lite2.in1k_vaiq/model.py new file mode 100644 index 000000000..a54448264 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_lite2.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(260, 260) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_lite3.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_lite3.in1k_vaiq/model.py new file mode 100644 index 000000000..240d45f78 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_lite3.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(300, 300) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnet_lite4.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnet_lite4.in1k_vaiq/model.py new file mode 100644 index 000000000..09bb075be --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnet_lite4.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(380, 380) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b0.in1k_train_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b0.in1k_train_vaiq/model.py new file mode 100644 index 000000000..aefb3a075 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b0.in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(192, 192) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b0.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b0.in1k_vaiq/model.py new file mode 100644 index 000000000..b0bf3339e --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b0.in1k_vaiq/model.py @@ -0,0 +1,49 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) + +# Post process output to do: +E2ESHARK_CHECK["postprocess"] = [ + (torch.nn.functional.softmax, [0], False, 0), + (torch.topk, [1], True, 1), +] diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b1.in1k_train_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b1.in1k_train_vaiq/model.py new file mode 100644 index 000000000..aefb3a075 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b1.in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(192, 192) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b1.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b1.in1k_vaiq/model.py new file mode 100644 index 000000000..e6974428d --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b1.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(240, 240) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b2.in1k_train_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b2.in1k_train_vaiq/model.py new file mode 100644 index 000000000..51f0548a9 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b2.in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(208, 208) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b2.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b2.in1k_vaiq/model.py new file mode 100644 index 000000000..a54448264 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b2.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(260, 260) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b3.in1k_train_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b3.in1k_train_vaiq/model.py new file mode 100644 index 000000000..e6974428d --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b3.in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(240, 240) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b3.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b3.in1k_vaiq/model.py new file mode 100644 index 000000000..240d45f78 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b3.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(300, 300) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b3.in21k_ft_in1k_train_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b3.in21k_ft_in1k_train_vaiq/model.py new file mode 100644 index 000000000..e6974428d --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b3.in21k_ft_in1k_train_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(240, 240) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_efficientnetv2_b3.in21k_ft_in1k_vaiq/model.py b/e2eshark/onnx/models/tf_efficientnetv2_b3.in21k_ft_in1k_vaiq/model.py new file mode 100644 index 000000000..240d45f78 --- /dev/null +++ b/e2eshark/onnx/models/tf_efficientnetv2_b3.in21k_ft_in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(300, 300) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_inception_v3_vaiq/model.py b/e2eshark/onnx/models/tf_inception_v3_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/tf_inception_v3_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_mobilenetv3_large_minimal_100.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_mobilenetv3_large_minimal_100.in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tf_mobilenetv3_large_minimal_100.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_mobilenetv3_small_075.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_mobilenetv3_small_075.in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tf_mobilenetv3_small_075.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_mobilenetv3_small_100.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_mobilenetv3_small_100.in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tf_mobilenetv3_small_100.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tf_mobilenetv3_small_minimal_100.in1k_vaiq/model.py b/e2eshark/onnx/models/tf_mobilenetv3_small_minimal_100.in1k_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tf_mobilenetv3_small_minimal_100.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tinynet_a.in1k_vaiq/model.py b/e2eshark/onnx/models/tinynet_a.in1k_vaiq/model.py new file mode 100644 index 000000000..aefb3a075 --- /dev/null +++ b/e2eshark/onnx/models/tinynet_a.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(192, 192) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tinynet_d.in1k_vaiq/model.py b/e2eshark/onnx/models/tinynet_d.in1k_vaiq/model.py new file mode 100644 index 000000000..89e8e5518 --- /dev/null +++ b/e2eshark/onnx/models/tinynet_d.in1k_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(152, 152) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tv_densenet121_vaiq/model.py b/e2eshark/onnx/models/tv_densenet121_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tv_densenet121_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tv_resnet101_vaiq/model.py b/e2eshark/onnx/models/tv_resnet101_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tv_resnet101_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tv_resnet152_vaiq/model.py b/e2eshark/onnx/models/tv_resnet152_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tv_resnet152_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tv_resnet34_vaiq/model.py b/e2eshark/onnx/models/tv_resnet34_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tv_resnet34_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tv_resnet50_vaiq/model.py b/e2eshark/onnx/models/tv_resnet50_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tv_resnet50_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/tv_resnext50_32x4d_vaiq/model.py b/e2eshark/onnx/models/tv_resnext50_32x4d_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/tv_resnext50_32x4d_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg11_bn_vaiq/model.py b/e2eshark/onnx/models/vgg11_bn_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg11_bn_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg11_vaiq/model.py b/e2eshark/onnx/models/vgg11_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg11_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg13_bn_vaiq/model.py b/e2eshark/onnx/models/vgg13_bn_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg13_bn_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg13_vaiq/model.py b/e2eshark/onnx/models/vgg13_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg13_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg16_bn_vaiq/model.py b/e2eshark/onnx/models/vgg16_bn_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg16_bn_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg16_vaiq/model.py b/e2eshark/onnx/models/vgg16_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg16_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg19_bn_vaiq/model.py b/e2eshark/onnx/models/vgg19_bn_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg19_bn_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/vgg19_vaiq/model.py b/e2eshark/onnx/models/vgg19_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/vgg19_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/wide_resnet101_2_vaiq/model.py b/e2eshark/onnx/models/wide_resnet101_2_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/wide_resnet101_2_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/wide_resnet50_2_test_vaiq/model.py b/e2eshark/onnx/models/wide_resnet50_2_test_vaiq/model.py new file mode 100644 index 000000000..f221670c0 --- /dev/null +++ b/e2eshark/onnx/models/wide_resnet50_2_test_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(288, 288) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/wide_resnet50_2_vaiq/model.py b/e2eshark/onnx/models/wide_resnet50_2_vaiq/model.py new file mode 100644 index 000000000..54fc5be6a --- /dev/null +++ b/e2eshark/onnx/models/wide_resnet50_2_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image() + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/xception41_vaiq/model.py b/e2eshark/onnx/models/xception41_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/xception41_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/xception41p_vaiq/model.py b/e2eshark/onnx/models/xception41p_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/xception41p_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/xception65_vaiq/model.py b/e2eshark/onnx/models/xception65_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/xception65_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/xception65p_vaiq/model.py b/e2eshark/onnx/models/xception65p_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/xception65p_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/xception71_vaiq/model.py b/e2eshark/onnx/models/xception71_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/xception71_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"]) diff --git a/e2eshark/onnx/models/xception_vaiq/model.py b/e2eshark/onnx/models/xception_vaiq/model.py new file mode 100644 index 000000000..ef74b88a5 --- /dev/null +++ b/e2eshark/onnx/models/xception_vaiq/model.py @@ -0,0 +1,43 @@ +import numpy, torch, sys +import onnxruntime + +# import from e2eshark/tools to allow running in current dir, for run through +# run.pl, commutils is symbolically linked to allow any rundir to work +sys.path.insert(0, "../../../tools/stubs") +from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image + +# Create an instance of it for this test +E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF) + + +# The generated or checked in onnx file must always be called model.onnx +# the tools/stubs/onnxmodel.py is appended to model.py +# to form runmodel.py in the rundirectory which is then taken +# through flow + + +# start an onnxrt session +session = onnxruntime.InferenceSession("model.onnx", None) + +# Even if model is quantized, the inputs and outputs are +# not, so apply float32 +# Get and process the image +img_ycbcr = setup_test_image(299, 299) + +model_input_X = to_numpy(img_ycbcr) + +# gets X in inputs[0] and Y in inputs[1] +inputs = session.get_inputs() +# gets Z in outputs[0] +outputs = session.get_outputs() + + +model_output = session.run( + [outputs[0].name], + {inputs[0].name: model_input_X}, +)[0] +E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)] +E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output] + +print("Input:", E2ESHARK_CHECK["input"]) +print("Output:", E2ESHARK_CHECK["output"])