Skip to content

Commit

Permalink
add support for config specific runtime flags
Browse files Browse the repository at this point in the history
  • Loading branch information
saienduri committed Apr 18, 2024
1 parent 4cc7682 commit dcb7731
Show file tree
Hide file tree
Showing 8 changed files with 14 additions and 21 deletions.
2 changes: 1 addition & 1 deletion iree_tests/configs/config_gpu_rocm_models.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"--device=hip"
],
"skip_compile_tests": [],
"skip_run_tests": ["pytorch/models/sdxl-scheduled-unet-3-bench"],
"skip_run_tests": [],
"expected_compile_failures": [],
"expected_run_failures": []
}
4 changes: 4 additions & 0 deletions iree_tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -266,6 +266,10 @@ def collect(self):
# try pytest-dependency or pytest-depends
for test_case in test_cases:
test_name = config_name + "_" + test_case.name
# use config specific runtime flagfile if it exists
config_specific_flagfile = test_directory + "/" + test_case.runtime_flagfile + "_" + config_name
if os.path.exists(config_specific_flagfile):
test_case.runtime_flagfile = config_specific_flagfile
spec = IreeCompileAndRunTestSpec(
test_directory=test_directory,
input_mlir_name=self.path.name,
Expand Down

This file was deleted.

Empty file.
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
--parameters=model=real_weights.irpa
--module=../sdxl-scheduled-unet-3-bench/model_rocm_llvm_task_real_weights.vmfb
--module=sdxl_scheduled_unet_pipeline_fp16_cpu.vmfb
--input=1x4x128x128xf16=@inference_input.0.bin
--input=2x64x2048xf16=@inference_input.1.bin
--input=2x1280xf16=@inference_input.2.bin
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
--parameters=model=real_weights.irpa
--module=sdxl_scheduled_unet_pipeline_fp16_rocm.vmfb
--input=1x4x128x128xf16=@inference_input.0.bin
--input=2x64x2048xf16=@inference_input.1.bin
--input=2x1280xf16=@inference_input.2.bin
--input=1xf16=@inference_input.3.bin
--expected_output=1x4x128x128xf16=@inference_output.0.bin
--expected_f16_threshold=0.8f
Binary file not shown.
Binary file not shown.

0 comments on commit dcb7731

Please sign in to comment.