Skip to content

Commit

Permalink
Add p0p1 int8 model.py (#304)
Browse files Browse the repository at this point in the history
 - onnx-import : 486
 - iree-compile : 433
 - inference      : 352
  • Loading branch information
AmosLewis authored Jul 26, 2024
1 parent ebb3f5a commit 3b3bb48
Show file tree
Hide file tree
Showing 459 changed files with 19,421 additions and 8 deletions.
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/adv_inception_v3_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(299, 299)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_focus_l_train_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(256, 256)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_focus_l_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(288, 288)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_focus_m_train_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(256, 256)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_focus_m_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(288, 288)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_l_train_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(256, 256)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_l_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(288, 288)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_m_train_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(256, 256)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
43 changes: 43 additions & 0 deletions e2eshark/onnx/models/cs3darknet_m_vaiq/model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
import numpy, torch, sys
import onnxruntime

# import from e2eshark/tools to allow running in current dir, for run through
# run.pl, commutils is symbolically linked to allow any rundir to work
sys.path.insert(0, "../../../tools/stubs")
from commonutils import E2ESHARK_CHECK_DEF, to_numpy, setup_test_image

# Create an instance of it for this test
E2ESHARK_CHECK = dict(E2ESHARK_CHECK_DEF)


# The generated or checked in onnx file must always be called model.onnx
# the tools/stubs/onnxmodel.py is appended to model.py
# to form runmodel.py in the rundirectory which is then taken
# through flow


# start an onnxrt session
session = onnxruntime.InferenceSession("model.onnx", None)

# Even if model is quantized, the inputs and outputs are
# not, so apply float32
# Get and process the image
img_ycbcr = setup_test_image(288, 288)

model_input_X = to_numpy(img_ycbcr)

# gets X in inputs[0] and Y in inputs[1]
inputs = session.get_inputs()
# gets Z in outputs[0]
outputs = session.get_outputs()


model_output = session.run(
[outputs[0].name],
{inputs[0].name: model_input_X},
)[0]
E2ESHARK_CHECK["input"] = [torch.from_numpy(model_input_X)]
E2ESHARK_CHECK["output"] = [torch.from_numpy(arr) for arr in model_output]

print("Input:", E2ESHARK_CHECK["input"])
print("Output:", E2ESHARK_CHECK["output"])
Loading

0 comments on commit 3b3bb48

Please sign in to comment.