From d14ed023523b3337e578faf06d932d186eb4cd2c Mon Sep 17 00:00:00 2001 From: "Jim Wang @ Intel" Date: Tue, 5 Nov 2024 10:54:40 -0700 Subject: [PATCH] feat: add/update benchmark of loss prevention pipelines documentation and make targets (#16) * feat: add/update benchmark of loss prevention pipelines documentation and make targets CLOSES: #15 Signed-off-by: Jim Wang * docs: update benchmark.md documentation to add more parameters Signed-off-by: Jim Wang * fix: address Sean's PR comments on typos Signed-off-by: Jim Wang --------- Signed-off-by: Jim Wang --- .gitignore | 3 ++- Makefile | 15 ++++++++++-- README.md | 5 ++++ benchmark.md | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 benchmark.md diff --git a/.gitignore b/.gitignore index 0f53dcd..bca876e 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,3 @@ models/ -__pycache__ \ No newline at end of file +__pycache__ +results/ \ No newline at end of file diff --git a/Makefile b/Makefile index 8fe74e9..ccef652 100644 --- a/Makefile +++ b/Makefile @@ -4,6 +4,7 @@ .PHONY: build build-realsense run down .PHONY: build-telegraf run-telegraf run-portainer clean-all clean-results clean-telegraf clean-models down-portainer .PHONY: download-models clean-test run-demo run-headless download-yolov8s +.PHONY: clean-benchmark-results MKDOCS_IMAGE ?= asc-mkdocs PIPELINE_COUNT ?= 1 @@ -11,6 +12,7 @@ TARGET_FPS ?= 14.95 DOCKER_COMPOSE ?= docker-compose.yml RESULTS_DIR ?= $(PWD)/results RETAIL_USE_CASE_ROOT ?= $(PWD) +BENCHMARK_DURATION ?= 45 download-models: download-yolov8s ./download_models/downloadModels.sh @@ -88,10 +90,19 @@ build-benchmark: cd performance-tools && $(MAKE) build-benchmark-docker benchmark: build-benchmark download-models - cd performance-tools/benchmark-scripts && python benchmark.py --compose_file ../../src/docker-compose.yml --pipeline $(PIPELINE_COUNT) + cd performance-tools/benchmark-scripts && python benchmark.py --compose_file ../../src/docker-compose.yml \ + --pipeline $(PIPELINE_COUNT) --duration $(BENCHMARK_DURATION) --results_dir $(RESULTS_DIR) +# consolidate to show the summary csv + @cd performance-tools/benchmark-scripts && ROOT_DIRECTORY=$(RESULTS_DIR) $(MAKE) --no-print-directory consolidate && \ + echo "Loss Prevention benchmark results are saved in $(RESULTS_DIR)/summary.csv file" && \ + echo "====== Loss prevention benchmark results summary: " && cat $(RESULTS_DIR)/summary.csv benchmark-stream-density: build-benchmark download-models - cd performance-tools/benchmark-scripts && python benchmark.py --compose_file ../../src/docker-compose.yml --target_fps $(TARGET_FPS) --density_increment 1 --results_dir $(RESULTS_DIR) + cd performance-tools/benchmark-scripts && python benchmark.py --compose_file ../../src/docker-compose.yml \ + --target_fps $(TARGET_FPS) --density_increment 1 --results_dir $(RESULTS_DIR) + +clean-benchmark-results: + cd performance-tools/benchmark-scripts && rm -rf $(RESULTS_DIR)/* || true build-telegraf: cd telegraf && $(MAKE) build diff --git a/README.md b/README.md index 315f0cd..8b3bd4f 100644 --- a/README.md +++ b/README.md @@ -34,6 +34,11 @@ stop containers: make down ``` +## Pipeline Benchmark + +Go here for [the documentation of loss prevention pipeline benchmark](./benchmark.md) + + ## [Advanced Documentation](https://intel-retail.github.io/documentation/use-cases/loss-prevention/loss-prevention.html) ## Join the community diff --git a/benchmark.md b/benchmark.md new file mode 100644 index 0000000..26e94cd --- /dev/null +++ b/benchmark.md @@ -0,0 +1,68 @@ +# Benchmark Loss Prevention Pipelines + +## Prerequisites for benchmark tools + +Before running any benchmark on the loss prevention pipelines, please check the [prerequisites documentation of performance-tool benchmark](https://github.com/intel-retail/documentation/blob/main/docs_src/performance-tools/benchmark.md#prerequisites) and make you have those set up and installed. + +## Run Benchmark Use Cases + +When in the loss-prevention project base directory, make sure you have the latest code from performance-tool repo via running the following command: + +```bash +make update-submodules +``` + +and then build the whole benchmark tools: + +```bash +make build-benchmark +``` + +Once benchmark tools are built, there are two categories to do the benchmark: + +1. Benchmarking the loss-prevention running pipelines: + +```bash +make benchmark +``` + +!!! Note + For more details on how this works, you can check the documentation of performance-tools in [Benchmark a CV Pipeline](https://github.com/intel-retail/documentation/blob/main/docs_src/performance-tools/benchmark.md#benchmark-a-cv-pipeline) section. + +1. Benchmarking the stream density of the loss-prevention pipelines: + +```bash +make benchmark-stream-density +``` + +!!! Note + For more details on how this works, you can check the documentation of performance-tools in [Benchmark Stream Density for CV Pipelines](https://github.com/intel-retail/documentation/blob/main/docs_src/performance-tools/benchmark.md#benchmark-stream-density-for-cv-pipelines) section. + +## Tuning Benchmark Parameters + +You can tune some benchmark parameters when you benchmark loss-prevention pipelines: + +| Parameter Name | Default Value | Description | +| -----------------------|-----------------|----------------------------------------------------------------------| +| PIPELINE_COUNT | 1 | number of loss-prevention pipelines to launch for benchmarking | +| BENCHMARK_DURATION | 45 | the time period of benchmarking will be run in second | +| TARGET_FPS | 14.95 | used for stream density maintaining that target frames per second (fps) while having maximum number of pipelines running | +| RESULTS_DIR | ./results | the directory of the outputs for running pipeline logs and fps info | +| PIPELINE_SCRIPT | yolov5s.sh | the script to run the pipeline, for yolov8, you can use yolov8s_roi.sh for running region of interest pipeline | +| RENDER_MODE | 0 | when it is set to 1, another popup winodw will display the input source video and some of inferencing results like bounding boxes and/or region of interests | + +As an example, the following command with parameter `PIPELINE_COUNT` will do the benchmark for 2 loss-prevention pipelines: + +```bash + PIPELINE_COUNT=2 make benchmark +``` + +## Clean up + +To clean up all the benchmark results, run the command: + +```bash +make clean-benchmark-results +``` + +This comes in handy when you want to have a new set of benchmarks for different benchmark use cases like different pipelines or running duration.