diff --git a/benchmark-scripts/Dockerfile.xpu b/benchmark-scripts/Dockerfile.xpu index e727bcc..ebb6935 100644 --- a/benchmark-scripts/Dockerfile.xpu +++ b/benchmark-scripts/Dockerfile.xpu @@ -48,7 +48,7 @@ WORKDIR /tmp/work RUN apt-get update && \ apt-get install -y --no-install-recommends wget gnupg2 ca-certificates && \ wget -qO - https://repositories.intel.com/graphics/intel-graphics.key | gpg --dearmor --output /usr/share/keyrings/intel-graphics.gpg && \ - wget https://github.com/intel/xpumanager/releases/download/V1.2.3/xpumanager_1.2.3_20230221.054746.0e2d4bfb+ubuntu22.04_amd64.deb && \ + wget https://github.com/intel/xpumanager/releases/download/V1.2.24/xpumanager_1.2.24_20231120.070911.ddc18e8a.u22.04_amd64.deb && \ echo 'deb [arch=amd64 signed-by=/usr/share/keyrings/intel-graphics.gpg] https://repositories.intel.com/graphics/ubuntu jammy flex' | \ tee /etc/apt/sources.list.d/intel.gpu.jammy.list && \ apt-get update && \ diff --git a/benchmark-scripts/Makefile b/benchmark-scripts/Makefile new file mode 100644 index 0000000..d1e72c2 --- /dev/null +++ b/benchmark-scripts/Makefile @@ -0,0 +1,20 @@ + +build-all: build-benchmark build-xpu build-igt + +build-benchmark: + echo "Building benchmark container HTTPS_PROXY=${HTTPS_PROXY} HTTP_PROXY=${HTTP_PROXY}" + docker build --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t benchmark:dev -f Dockerfile.benchmark . + +build-xpu: + echo "Building xpu HTTPS_PROXY=${HTTPS_PROXY} HTTP_PROXY=${HTTP_PROXY}" + docker build --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t benchmark:xpu -f Dockerfile.xpu . + +build-igt: + echo "Building igt HTTPS_PROXY=${HTTPS_PROXY} HTTP_PROXY=${HTTP_PROXY}" + docker build --build-arg HTTPS_PROXY=${HTTPS_PROXY} --build-arg HTTP_PROXY=${HTTP_PROXY} -t benchmark:igt -f Dockerfile.igt . + +run: + docker run -it --rm -v /var/run/docker.sock:/var/run/docker.sock -v `pwd`/results:/tmp/results --net=host --privileged benchmark:dev /bin/bash + +consolidate: + docker run -itd -v `pwd`/$(ROOT_DIRECTORY):/$(ROOT_DIRECTORY) -e ROOT_DIRECTORY=$(ROOT_DIRECTORY)--net=host --privileged benchmark:dev /bin/bash -c "python3 consolidate_multiple_run_of_metrics.py --root_directory $(ROOT_DIRECTORY)/ --output $(ROOT_DIRECTORY)/summary.csv" \ No newline at end of file diff --git a/benchmark-scripts/benchmark.sh b/benchmark-scripts/benchmark.sh index b09f08e..884e9cf 100755 --- a/benchmark-scripts/benchmark.sh +++ b/benchmark-scripts/benchmark.sh @@ -12,26 +12,22 @@ error() { show_help() { echo " - usage: $0 + usage: PIPELINE_PROFILE=\"object_detection\" (or others from make list-profiles) sudo -E ./$0 --performance_mode the system performance setting [powersave | performance] - --pipelines NUMBER_OF_PIPELINES | --stream_density TARGET_FPS + --pipelines NUMBER_OF_PIPELINES | --stream_density TARGET_FPS [PIPELINE_INCREMENT] --logdir FULL_PATH_TO_DIRECTORY --duration SECONDS (not needed when --stream_density is specified) --init_duration SECONDS --platform core|xeon|dgpu.x - --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 - [--classification_disabled] - [ --ocr_disabled | --ocr [OCR_INTERVAL OCR_DEVICE] ] - [ --barcode_disabled | --barcode [BARCODE_INTERVAL] ] - [--realsense_enabled] + --inputsrc RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 Note: 1. dgpu.x should be replaced with targetted GPUs such as dgpu (for all GPUs), dgpu.0, dgpu.1, etc 2. filesrc will utilize videos stored in the sample-media folder 3. Set environment variable STREAM_DENSITY_MODE=1 for starting single container stream density testing 4. Set environment variable RENDER_MODE=1 for displaying pipeline and overlay CV metadata - 5. Stream density can take two parameters: first one is for target fps, a float type value, and - the second one is increment integer of pipelines and is optional (in which case the increments will be dynamically adjusted internally) + 5. Stream density can take two parameters: first one TARGET_FPS is for target fps, a float type value, and + the second one PIPELINE_INCREMENT is increment integer of pipelines and is optional (in which case the increments will be dynamically adjusted internally) " } @@ -138,14 +134,10 @@ get_options() { # USAGE: # 1. PLATFORM: core|xeon|dgpu.x # 2. INPUT SOURCE: RS_SERIAL_NUMBER|CAMERA_RTSP_URL|file:video.mp4|/dev/video0 -# 3. CLASSIFICATION: enabled|disabled -# 4. OCR: disabled|OCR_INTERVAL OCR_DEVICE -# 5. BARCODE: disabled|BARCODE_INTERVAL -# 6. REALSENSE: enabled|disabled -# 7. PIPELINE_NUMBER: the number of pipelines to start or specify MAX and a stream density benchmark will be performed with a 15 fps target per pipeline -# 8. LOG_DIRECTORY: the location to store all the log files. The consolidation script will look for directories within the top level directory and process the results in each one so the user will want to keep in mind this structure when creating the log directory. For example, for multiple videos with different number of objects, a log_directory would look like: yolov5s_6330N/object1_mixed. Whatever is meaningful for the test run. -# 9. DURATION: the amount of time to run the data collection -# 10 COMPLETE_INIT_DURATION: the amount of time to allow the system to settle prior to starting the data collection. +# 3. PIPELINE_NUMBER: the number of pipelines to start or specify MAX and a stream density benchmark will be performed with a 15 fps target per pipeline +# 4. LOG_DIRECTORY: the location to store all the log files. The consolidation script will look for directories within the top level directory and process the results in each one so the user will want to keep in mind this structure when creating the log directory. For example, for multiple videos with different number of objects, a log_directory would look like: yolov5s_6330N/object1_mixed. Whatever is meaningful for the test run. +# 5. DURATION: the amount of time to run the data collection +# 6 COMPLETE_INIT_DURATION: the amount of time to allow the system to settle prior to starting the data collection. # load benchmark params if [ -z $1 ] @@ -154,7 +146,7 @@ then fi get_options "$@" -# load docker-run params +# load run params shift $OPTIONS_TO_SKIP # the following syntax for arguments is meant to be re-splitting for correctly used on all $DOCKER_RUN_ARGS # shellcheck disable=SC2068 @@ -209,15 +201,32 @@ do NUM_GPU=$GPU_NUM_170 fi - # docker-run needs to run in it's directory for the file paths to work + # run.sh needs to run in it's directory for the file paths to work cd ../ +# pwd - echo "DEBUG: docker-run.sh" "$@" + echo "DEBUG: run.sh" "$@" - for pipelineIdx in $( seq 0 $(($PIPELINE_COUNT - 1)) ) + for pipelineIdx in $( seq 0 $((PIPELINE_COUNT - 1)) ) do - if [ -z "$STREAM_DENSITY_FPS" ]; then - #pushd .. + if [ -z "$STREAM_DENSITY_FPS" ]; then + isCapi=$(docker run --rm -v "${PWD}":/workdir mikefarah/yq '.OvmsSingleContainer' /workdir/configs/opencv-ovms/cmd_client/res/"$PIPELINE_PROFILE"/configuration.yaml) + if [ "$isCapi" = false ] + then + echo "multiple pipelines for non-capi case..." + while true + do + containerCnt=$(docker ps -aq -f name="_ovms_pl" | wc -w) + if [ "$containerCnt" -lt "$pipelineIdx" ] + then + echo "pipelineIdx=$pipelineIdx, containerCnt=$containerCnt" + echo "waiting for the previous pipeline container to start up..." + sleep 1 + else + break + fi + done + fi echo "Starting pipeline$pipelineIdx" if [ "$CPU_ONLY" != 1 ] && ([ "$HAS_FLEX_140" == 1 ] || [ "$HAS_FLEX_170" == 1 ]) then @@ -239,13 +248,13 @@ do break fi done - LOW_POWER=$LOW_POWER DEVICE=$DEVICE ./docker-run.sh "$@" + LOW_POWER=$LOW_POWER DEVICE=$DEVICE ./run.sh "$@" else echo "Error: NUM_GPU is 0, cannot run" exit 1 fi else - CPU_ONLY=$CPU_ONLY LOW_POWER=$LOW_POWER DEVICE=$DEVICE ./docker-run.sh "$@" + CPU_ONLY=$CPU_ONLY LOW_POWER=$LOW_POWER DEVICE=$DEVICE ./run.sh "$@" fi sleep 1 #popd @@ -270,7 +279,7 @@ do # Sync sleep in stream density script and platform metrics data collection script CPU_ONLY=$CPU_ONLY LOW_POWER=$LOW_POWER COMPLETE_INIT_DURATION=$COMPLETE_INIT_DURATION \ STREAM_DENSITY_FPS=$STREAM_DENSITY_FPS STREAM_DENSITY_INCREMENTS=$STREAM_DENSITY_INCREMENTS \ - STREAM_DENSITY_MODE=1 DEVICE=$DEVICE ./docker-run.sh "$@" + STREAM_DENSITY_MODE=1 DEVICE=$DEVICE ./run.sh "$@" #popd fi done @@ -300,61 +309,27 @@ do else echo "Waiting for workload(s) to finish..." waitingMsg=1 - ovmsCase=0 - # figure out which case we are running like either "ovms-server" or "automated-self-checkout" container - mapfile -t sids < <(docker ps -f name=automated-self-checkout -f status=running -q -a) - stream_workload_running=$(echo "${sids[@]}" | wc -w) - if (( $(echo "$stream_workload_running" 0 | awk '{if ($1 == $2) print 1;}') )) - then - # if we don't find any docker container running for dlstreamer (i.e. name with automated-self-checkout) - # then it is ovms running case - echo "running ovms client case..." - ovmsCase=1 - else - echo "running dlstreamer case..." - fi # keep looping through until stream density script is done while true do - if [ $ovmsCase -eq 1 ] + # stream density is running from profile-launcer so we check that executing process + stream_density_running=$(pgrep -fa "stream_density.sh") + if [ -z "$stream_density_running" ] then - stream_density_running=$(docker exec ovms-client0 bash -c 'ps -aux | grep "stream_density_framework-pipelines.sh" | grep -v grep') - if [ -z "$stream_density_running" ] + # when the stream-density is done, we should clean up the profile-launcer process + proifleLauncherPid=$(pgrep -f "profile-launcher") + if [ -n "$proifleLauncherPid" ] then - # when stream density script process is done, we need to kill the ovms-client0 container as it keeps running forever - echo "killing ovms-client0 docker container..." - docker rm ovms-client0 -f - break - else - if [ $waitingMsg -eq 1 ] - then - echo "stream density script is still running..." - waitingMsg=0 - fi + pkill -P "$proifleLauncherPid" + echo "profile-launcher is done" fi + break else - # since there is no longer --rm automatically remove docker-run containers - # we want to remove those first if any: - exitedIds=$(docker ps -f name=automated-self-checkout -f status=exited -q -a) - if [ -n "$exitedIds" ] + if [ $waitingMsg -eq 1 ] then - docker rm "$exitedIds" - fi - - mapfile -t sids < <(docker ps --filter="name=automated-self-checkout" -q -a) - #echo "sids: " "${sids[@]}" - stream_workload_running=$(echo "${sids[@]}" | wc -w) - #echo "stream workload_running: $stream_workload_running" - if (( $(echo "$stream_workload_running" 0 | awk '{if ($1 == $2) print 1;}') )) - then - break - else - if [ $waitingMsg -eq 1 ] - then - echo "stream density script is still running..." - waitingMsg=0 - fi + echo "stream density script is still running..." + waitingMsg=0 fi fi # there are still some pipeline running containers, waiting for them to be finished... diff --git a/benchmark-scripts/camera-simulator.sh b/benchmark-scripts/camera-simulator.sh index bdca8f0..9f5d084 100755 --- a/benchmark-scripts/camera-simulator.sh +++ b/benchmark-scripts/camera-simulator.sh @@ -21,6 +21,8 @@ if [ "${COMMAND,,}" = "start" ]; then if [ -z "$CAMERAS" ]; then CAMERAS=${#FILES[@]} fi + + cd $SOURCE_DIR/camera-simulator docker run --rm -t --network=host --name camera-simulator aler9/rtsp-simple-server >rtsp_simple_server.log.txt 2>&1 & index=0 diff --git a/benchmark-scripts/collect_platform_metrics.sh b/benchmark-scripts/collect_platform_metrics.sh index 8d6c7a0..0529c16 100755 --- a/benchmark-scripts/collect_platform_metrics.sh +++ b/benchmark-scripts/collect_platform_metrics.sh @@ -114,7 +114,7 @@ then else if [ "$is_xeon" == "1" ] then - # this script is actually called by ./benchmark.sh shell script through the docker container benchmark:dev, so we don't need another docker-run here any more + # this script is actually called by ./benchmark.sh shell script through the docker container benchmark:dev, so we don't need another docker run here any more timeout "$DURATION" "$PCM_DIRECTORY"/pcm-memory 1 -silent -nc -csv="$LOG_DIRECTORY"/memory_bandwidth.csv & fi fi diff --git a/benchmark-scripts/download_sample_videos.sh b/benchmark-scripts/download_sample_videos.sh index 1ebf4a0..b51b8b8 100755 --- a/benchmark-scripts/download_sample_videos.sh +++ b/benchmark-scripts/download_sample_videos.sh @@ -9,4 +9,5 @@ ./format_avc_mp4.sh coca-cola-4465029.mp4 https://www.pexels.com/video/4465029/download/ "$1" "$2" "$3" ./format_avc_mp4.sh vehicle-bike.mp4 https://www.pexels.com/video/853908/download/ "$1" "$2" "$3" #./format_avc_mp4.sh grocery-items-on-the-kitchen-shelf-4983686.mp4 https://www.pexels.com/video/4983686/download/ $1 $2 $3 -#./format_avc_mp4.sh couple-paying-at-the-counter-in-the-grocery-4121754.mp4 https://www.pexels.com/video/4121754/download/ +./format_avc_mp4.sh video_of_people_walking_855564.mp4 https://www.pexels.com/video/855564/download/ "$1" "$2" "$3" +./format_avc_mp4.sh barcode.mp4 https://github.com/antoniomtz/sample-clips/raw/main/barcode.mp4 "$1" "$2" "$3" diff --git a/benchmark-scripts/format_avc_mp4.sh b/benchmark-scripts/format_avc_mp4.sh index 7ed4a0b..4fb9b64 100755 --- a/benchmark-scripts/format_avc_mp4.sh +++ b/benchmark-scripts/format_avc_mp4.sh @@ -16,8 +16,8 @@ show_help() { " } -WIDTH=3840 -HEIGHT=2160 +WIDTH=1920 +HEIGHT=1080 FPS=15 if [ -z "$2" ] @@ -66,19 +66,6 @@ then exit 0 fi -FIND_IMAGE_SOC=$(docker images | grep "sco-soc") -FIND_IMAGE_DGPU=$(docker images | grep "sco-dgpu") -if [ -z "$FIND_IMAGE_SOC" ] && [ -z "$FIND_IMAGE_DGPU" ] -then - echo "ERROR: Can not find docker image sco-soc or sco-dgpu, please build image first!" - exit 1 -elif [ ! -z "$FIND_IMAGE_DGPU" ] -then - TAG=sco-dgpu:2.0 -else - TAG=sco-soc:2.0 -fi - if [ ! -f ../sample-media/$1 ] && [ ! -f ../sample-media/$result ] then wget -O ../sample-media/$1 $2 @@ -98,7 +85,7 @@ SAMPLE_MEDIA_DIR="$PWD"/../sample-media docker run --network host --privileged --user root --ipc=host -e VIDEO_FILE="$1" -e DISPLAY=:0 \ -v /tmp/.X11-unix:/tmp/.X11-unix \ -v "$SAMPLE_MEDIA_DIR"/:/vids \ - -w /vids -it --rm "$TAG" \ + -w /vids -it --rm intel/dlstreamer:2023.0.0-ubuntu22-gpu682-dpcpp \ bash -c "if [ -f /vids/$result ]; then exit 1; else gst-launch-1.0 filesrc location=/vids/$1 ! qtdemux ! h264parse ! vaapih264dec ! vaapipostproc width=$WIDTH height=$HEIGHT ! videorate ! 'video/x-raw, framerate=$FPS/1' ! vaapih264enc ! h264parse ! mp4mux ! filesink location=/vids/$result; fi" rm ../sample-media/"$1" diff --git a/benchmark-scripts/requirements-xpu.txt b/benchmark-scripts/requirements-xpu.txt index b1987f3..a1ba12a 100644 --- a/benchmark-scripts/requirements-xpu.txt +++ b/benchmark-scripts/requirements-xpu.txt @@ -1,8 +1,8 @@ -Flask==2.3.3 +Flask==3.0.0 Flask-HTTPAuth==4.8.0 requests==2.31.0 -prometheus-client==0.17.1 -grpcio==1.58.0 -protobuf==4.24.3 +prometheus-client==0.19.0 +grpcio==1.60.2 +protobuf==4.25.1 marshmallow==3.20.1 gunicorn[gthread]==21.2.0 \ No newline at end of file diff --git a/benchmark-scripts/requirements.txt b/benchmark-scripts/requirements.txt index 2a064f7..8564817 100644 --- a/benchmark-scripts/requirements.txt +++ b/benchmark-scripts/requirements.txt @@ -1,3 +1,3 @@ natsort==8.4.0 -numpy==1.26.0 -pandas==2.1.1 +numpy==1.26.2 +pandas==2.1.4 diff --git a/benchmark-scripts/smoke_test_benchmark.sh b/benchmark-scripts/smoke_test_benchmark.sh new file mode 100755 index 0000000..fe87b47 --- /dev/null +++ b/benchmark-scripts/smoke_test_benchmark.sh @@ -0,0 +1,66 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# initial setup +( + cd .. + make clean-all + sleep 3 + make build-dlstreamer +) + +# build benchmark Docker images: +make + +# Download media +./download_sample_videos.sh + +PLATFORM=$1 +CPU_ONLY=$2 + +if [ -z "$PLATFORM" ] +then + PLATFORM="core" +fi + +if [ -z "$CPU_ONLY" ] +then + CPU_ONLY=0 +fi + +sudo rm -rf results || true +sudo rm -rf platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_gst/ || true +sudo rm -rf platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_capi_yolov5_ensemble/ || true +sudo rm -rf platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_object_detection/ || true +sudo rm -rf platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_stream_density/ || true + +# Note: all of benchmarking pipelines are run with RENDER_MODE=0 for better performance without spending extra resources for rendering +# Camera simulator full pipeline +# shell check has false postive on SC2097 and SC2098 as it doesn't detect -E flag which is inhereted the envs into subprocess; hence disable here +# shellcheck disable=SC2097,SC2098 +PIPELINE_PROFILE="gst" CPU_ONLY="$CPU_ONLY" RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines 1 --logdir platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_gst/data --init_duration 30 --duration 60 --platform "$PLATFORM" --inputsrc rtsp://127.0.0.1:8554/camera_0 +# consolidate results +make consolidate ROOT_DIRECTORY=platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_te_smoke_test_camera_simulator_gst + +# Camera simulator for capi_yolov5_ensemble +# shell check has false postive on SC2097 and SC2098 as it doesn't detect -E flag which is inhereted the envs into subprocess; hence disable here +# shellcheck disable=SC2097,SC2098 +PIPELINE_PROFILE="capi_yolov5_ensemble" CPU_ONLY=$CPU_ONLY RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines 1 --logdir platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_capi_yolov5_ensemble/data --init_duration 30 --duration 60 --platform "$PLATFORM" --inputsrc rtsp://127.0.0.1:8554/camera_0 +# consolidate results +make consolidate ROOT_DIRECTORY=platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_capi_yolov5_ensemble + +# Camera simulator yolov5 only +# shell check has false postive on SC2097 and SC2098 as it doesn't detect -E flag which is inhereted the envs into subprocess; hence disable here +# shellcheck disable=SC2097,SC2098 +PIPELINE_PROFILE="object_detection" CPU_ONLY=$CPU_ONLY RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines 1 --logdir platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_object_detection/data --init_duration 30 --duration 60 --platform "$PLATFORM" --inputsrc rtsp://127.0.0.1:8554/camera_0 +# consolidate results +make consolidate ROOT_DIRECTORY=platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_camera_simulator_object_detection + +# Stream density for object detection +# shell check has false postive on SC2097 and SC2098 as it doesn't detect -E flag which is inhereted the envs into subprocess; hence disable here +# shellcheck disable=SC2097,SC2098 +PIPELINE_PROFILE="object_detection" CPU_ONLY=$CPU_ONLY RENDER_MODE=0 sudo -E ./benchmark.sh --stream_density 60 --logdir platform_"$PLATFORM"_cpuonly_"$CPU_ONLY"_smoke_test_stream_density/data --init_duration 30 --duration 60 --platform "$PLATFORM" --inputsrc rtsp://127.0.0.1:8554/camera_0 diff --git a/benchmark-scripts/stream_density.sh b/benchmark-scripts/stream_density.sh index 4e59580..91c2630 100755 --- a/benchmark-scripts/stream_density.sh +++ b/benchmark-scripts/stream_density.sh @@ -5,43 +5,305 @@ # SPDX-License-Identifier: Apache-2.0 # -CAMERA_ID=$1 -DEVICE=$2 -TARGET_FPS=14.5 +# this function cleans up parent process and its child processes +# the first input is the parent process to be cleaned up +cleanupPipelineProcesses() +{ + pidToKill=$1 + childPids=$(pgrep -P "$pidToKill") + echo "decrementing pipelines and to kill pid $pidToKill" >> "$log" + waitForChildPidKilled=0 + if [ -z "$childPids" ] + then + echo "for parent pid $pidToKill, there is no child pids to kill" >> "$log" + else + echo "parent pid $pidToKill with childPids $childPids to be killed" >> "$log" + waitForChildPidKilled=1 + fi + + # kill the parent process with PID $pidToKill + pkill -P "$pidToKill" + + # make sure all child pids are gone before proceed + MAX_PID_WAITING_COUNT=10 + waitingCnt=0 + while [ $waitForChildPidKilled -eq 1 ] + do + numExistingChildren=0 + for childPid in $childPids + do + if ps -p "$childPid" > /dev/null + then + echo "child pid: $childPid exists" >> "$log" + numExistingChildren=$(( numExistingChildren + 1 )) + + processCmd=$(ps -p "$childPid" -o cmd=) + echo "DEBUG: pipeline process is $processCmd" >> "$log" + # for those child process is running in docker run, + # then we use docker rm to delete the process + isDockerRun=$(echo "$processCmd" | grep "docker run") + if [ -n "$isDockerRun" ] + then + containerName=$(echo "$processCmd" | awk -F "CONTAINER_NAME=" '{print $2}' | awk '{print $1}') + if [ -n "$containerName" ] + then + echo "terminating docker process with CONTAINER_NAME $containerName..." >> "$log" + docker rm "$containerName" -f + else + echo "Docker run process without CONTAINER_NAME: skip...." >> "$log" + fi + fi + # if not docker run process, use kill to terminate it when it exceeds the maiximum waitingCnt + # this also tries to kill the docker process without container name + if [ $waitingCnt -ge $MAX_PID_WAITING_COUNT ] + then + echo "exceeding the max. pid waiting count $MAX_PID_WAITING_COUNT, kill it directly..." >> "$log" + kill -9 "$childPid" + waitingCnt=0 + fi + else + echo "no child pid exists $childPid" + fi + done + + if [ $numExistingChildren -eq 0 ] + then + echo "all child processes for $pidToKill are cleaned up" + break + else + waitingCnt=$(( waitingCnt + 1 )) + fi + done + + # check the parent process is gone before proceed + while ps -p "$pidToKill" > /dev/null + do + echo "$pidToKill is still running" + sleep 1 + done + echo "done with clean up parent process $pidToKill" +} + +TARGET_FPS=15 MEETS_FPS=true -#start one stream, check fps, if good, stop the server and start two streams. +INIT_DURATION=120 +MAX_GUESS_INCREMENTS=5 num_pipelines=1 +increments=1 +RESULT_DIR="${RESULT_DIR:=$RUN_PATH/results}" +log="${log:=$RESULT_DIR/stream_density.log}" + +if [ -n "$STREAM_DENSITY_FPS" ] +then + if (( $(echo "$STREAM_DENSITY_FPS" | awk '{if ($1 <= 0) print 1;}') )) + then + echo "ERROR: stream density input target fps should be greater than 0" >> "$log" + exit 1 + fi + TARGET_FPS=$STREAM_DENSITY_FPS +fi + +if [ -n "$STREAM_DENSITY_INCREMENTS" ] +then + if (( $(echo "$STREAM_DENSITY_INCREMENTS" | awk '{if ($1 <= 0) print 1;}') )) + then + echo "ERROR: stream density input increments should be greater than 0" >> "$log" + exit 1 + fi +fi + +if [ -n "$COMPLETE_INIT_DURATION" ] +then + INIT_DURATION=$COMPLETE_INIT_DURATION +fi + +echo "Stream density TARGET_FPS set for $TARGET_FPS and INIT_DURATION set for $INIT_DURATION" > "$log" +echo "Starting single container stream density benchmarking" >> "$log" + +GPU_DEVICE_TOGGLE="1" + +decrementing=0 +start_cid_count=0 +declare -a pipelinePIDs + while [ $MEETS_FPS = true ] do - echo "Starting RTSP stream" - ./camera-simulator.sh - sleep 10 - echo "Starting pipelines. Device: $DEVICE" - #docker-run needs to run in it's directory for the file paths to work - cd ../ - for i in $( seq 0 $(($num_pipelines))) + total_fps_per_stream=0.0 + total_fps=0.0 + + cid_count=$(( num_pipelines - 1 )) + + echo "Starting pipeline: $num_pipelines" >> "$log" + pipelineArgs=("${@:1}") + if [ -z "$AUTO_SCALE_FLEX_140" ] + then + echo "DEBUG: " "${pipelineArgs[@]}" >> "$log" + if [ $decrementing -eq 0 ] + then + for i in $( seq $(( start_cid_count )) $(( num_pipelines - 1 ))) + do + echo "the pipeline args is " "${pipelineArgs[@]}" + cid_count=$i + "${pipelineArgs[@]}" & + pid=$! + pipelinePIDs+=("$pid") + done + echo "pipeline pid list: " "${pipelinePIDs[@]}" + else + # kill the pipeline with index based on the current pipeline number + pidToKill="${pipelinePIDs[$num_pipelines]}" + cleanupPipelineProcesses "$pidToKill" + pgrep -fa "$1" + echo + echo "current background running pipeline PIDs: $(jobs -p)" + fi + else + echo "INFO: Auto scaling on both flex 140 gpus...targetting device $GPU_DEVICE_TOGGLE" >> "$log" + for i in $( seq $(( start_cid_count )) $(( num_pipelines - 1 ))) + do + if [ $decrementing -eq 0 ] + then + cid_count=$i + if [ "$GPU_DEVICE_TOGGLE" == "1" ] + then + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD128 "${pipelineArgs[@]}" & + GPU_DEVICE_TOGGLE=2 + else + GST_VAAPI_DRM_DEVICE=/dev/dri/renderD129 "${pipelineArgs[@]}" & + GPU_DEVICE_TOGGLE=1 + fi + pid=$! + pipelinePIDs+=("$pid") + echo "pipeline pid list: " "${pipelinePIDs[@]}" + else + # kill the pipeline with index based on the current pipeline number + pidToKill="${pipelinePIDs[$num_pipelines]}" + cleanupPipelineProcesses "$pidToKill" + pgrep -fa "$1" + echo + echo "current background running pipeline PIDs: $(jobs -p)" + fi + done + fi + + echo "waiting for pipelines to settle" >> "$log" + sleep "$INIT_DURATION" + + # note: before reading the pipeline log files + # we want to give pipelines some time as the log files + # producing could be lagging behind... + max_retries=50 + retry=0 + foundAllLogs=0 + while [ $foundAllLogs -ne $num_pipelines ] do - echo "pipeline $i" - ./docker-run.sh --platform $DEVICE --inputsrc $CAMERA_ID + if [ $retry -ge $max_retries ] + then + echo "ERROR: cannot find all pipeline log files after retries, pipeline may have been failed..." >> "$log" + exit 1 + fi + + echo "checking presence of all pipeline log files..." + + foundAllLogs=0 + for i in $( seq 0 $(( cid_count ))) + do + # to make sure all non-empty pipeline log files are present before proceed + if [ -f "$RESULT_DIR/pipeline$i.log" ] && [ -s "$RESULT_DIR/pipeline$i.log" ] + then + echo "found non-empty pipeline$i.log file" >> "$log" + foundAllLogs=$(( foundAllLogs + 1 )) + else + echo "could not find non-empty pipeline$i.log file yet, will retry it again" >> "$log" + fi + done + retry=$(( retry + 1 )) + sleep 1 + done + + for i in $( seq 0 $(( cid_count ))) + do + # Last 10/20 seconds worth of currentfps + # filter out nan value just in case there is such value produced from pipeline + STREAM_FPS_LIST=$(awk '!/na/' "$RESULT_DIR"/pipeline"$i".log | tail -20) + if [ -z "$STREAM_FPS_LIST" ] + then + # we already checked non-empty log contents above, this check is here just in case everything is NaN + echo "Warning: No FPS returned from pipeline$i.log" + fi + stream_fps_sum=0 + stream_fps_count=0 + + for stream_fps in $STREAM_FPS_LIST + do + stream_fps_sum=$(echo "$stream_fps_sum" "$stream_fps" | awk '{print $1 + $2}') + stream_fps_count=$(echo "$stream_fps_count" 1 | awk '{print $1 + $2}') + done + stream_fps_avg=$(echo "$stream_fps_sum" "$stream_fps_count" | awk '{print $1 / $2}') + + + total_fps=$(echo "$total_fps" "$stream_fps_avg" | awk '{print $1 + $2}') + total_fps_per_stream=$(echo "$total_fps" "$num_pipelines" | awk '{print $1 / $2}') + echo "FPS for pipeline$i: $stream_fps_avg" >> "$log" done - cd - || { echo "ERROR: failed to change back to the previous directory"; exit 1; } - echo "waiting for pipelines to settle" - #time to let the pipelines settle - sleep 120 - - fps=`tail -1 ../results/pipeline0.log` - echo "FPS for total number of pipeline $(($i + 1)): $fps" - if (( $(echo $fps $TARGET_FPS | awk '{if ($1 > $2) print 1;}') )) + echo "Total FPS throughput: $total_fps" >> "$log" + echo "Total FPS per stream: $total_fps_per_stream" >> "$log" + + echo "decrementing: $decrementing" + echo "current total fps per stream = $total_fps_per_stream for $num_pipelines pipeline(s)" + + if [ "$decrementing" -eq 0 ] then - echo "yes" + if (( $(echo "$total_fps_per_stream" "$TARGET_FPS" | awk '{if ($1 >= $2) print 1;}') )) + then + # if the increments hint from $STREAM_DENSITY_INCREMENTS is not empty + # we will use it as the increments + # otherwise, we will try to adjust increments dynamically based on the rate of $total_fps_per_stream + # and $TARGET_FPS + if [ -n "$STREAM_DENSITY_INCREMENTS" ] + then + # there is increments hint from the input, so we will honor it + # after the first pipeline, the stream density increments will be appiled if we are not there yet + increments=$STREAM_DENSITY_INCREMENTS + else + # when there is no increments hint from input, the value of increments is calculated + # by the rate of $total_fps_per_stream and $TARGET_FPS per greedy policy + increments=$(echo "$total_fps_per_stream" "$TARGET_FPS" | awk '{print int($1 / $2)}') + # when calculated increments is == 1 under this case, the internal maximum increments + # will be used as there is no effective way to figure out what's the best increments in this case + if [ "$increments" -eq 1 ] + then + increments=$MAX_GUESS_INCREMENTS + fi + fi + echo "incrementing by $increments" + else + increments=-1 + decrementing=1 + echo "Below target fps $TARGET_FPS, starting to decrement pipelines by 1..." + fi else - echo "no" - MEETS_FPS=false - echo "Max number of pipelines: $(( $num_pipelines ))" + if (( $(echo "$total_fps_per_stream" "$TARGET_FPS" | awk '{if ($1 >= $2) print 1;}') )) + then + echo "found maximum number of pipelines to have target fps $TARGET_FPS" + MEETS_FPS=false + echo "Max stream density achieved for target FPS $TARGET_FPS is $num_pipelines" >> "$log" + echo "Finished stream density benchmarking" >> "$log" + else + if [ "$num_pipelines" -le 1 ] + then + echo "already reach num pipeline 1, and the fps per stream is $total_fps_per_stream but target FPS is $TARGET_FPS" >> "$log" + MEETS_FPS=false + break + else + echo "decrementing number of pipelines $num_pipelines by 1" + fi + fi fi - echo "Stopping server" - ./stop_server.sh - sleep 30 - num_pipelines=$(( $num_pipelines + 1 )) + start_cid_count=$(( num_pipelines )) + num_pipelines=$(( num_pipelines + increments )) + done #done while + +echo "stream_density done!" >> "$log" diff --git a/benchmark-scripts/test_benchmark_multiple_pipelines.sh b/benchmark-scripts/test_benchmark_multiple_pipelines.sh new file mode 100755 index 0000000..42dbd97 --- /dev/null +++ b/benchmark-scripts/test_benchmark_multiple_pipelines.sh @@ -0,0 +1,69 @@ +#!/bin/bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +checkNumOfPipelineLogFiles(){ + RESULT_DIR="../results" + expectedNumOfNonEmptyLogs=2 + numFoundLogs=0 + # check non-empty pipeline logs- should find 2 + for i in $( seq 0 $(( expectedNumOfNonEmptyLogs-1 ))) + do + if [ -f "$RESULT_DIR/pipeline$i.log" ] && [ -s "$RESULT_DIR/pipeline$i.log" ] + then + echo "found non-empty pipeline$i.log file" + numFoundLogs=$(( numFoundLogs + 1 )) + else + echo "could not find non-empty pipeline$i.log file" + fi + done + + if [ "$numFoundLogs" -ne "$expectedNumOfNonEmptyLogs" ] + then + echo "test for benchmarking multiple pipeline profile $PIPELINE_PROFILE FAILED: expect to have $expectedNumOfNonEmptyLogs pipelines but found $numFoundLogs" + else + echo "test for benchmarking multiple pipeline $PIPELINE_PROFILE PASSED: there are exactly $expectedNumOfNonEmptyLogs pipelines" + fi +} + +# inital setup +( + cd .. + make clean-all + sleep 3 + make build-python-apps + make build-capi_yolov5 +) + + ./download_sample_videos.sh + +# test for non-capi objec_detection +PIPELINE_PROFILE="object_detection" RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines 2 --logdir test_object_detection/data --duration 30 --init_duration 10 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1 +sleep 2 +PIPELINE_PROFILE="object_detection"; checkNumOfPipelineLogFiles > testbenchmark.log + +#clean up +sudo rm -rf test_object_detection/ +( + cd .. + make clean-all +) + +# test for capi-yolov5 +PIPELINE_PROFILE="capi_yolov5" RENDER_MODE=0 sudo -E ./benchmark.sh --pipelines 2 --logdir test_capi_yolov5/data --duration 30 --init_duration 10 --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1 +sleep 2 +PIPELINE_PROFILE="capi_yolov5"; checkNumOfPipelineLogFiles >> testbenchmark.log + +#clean up +sudo rm -rf test_capi_yolov5/ +( + cd .. + make clean-all +) + +# show test results: +grep --color=never "test for benchmarking multiple pipeline" testbenchmark.log +rm testbenchmark.log diff --git a/benchmark-scripts/test_format_avc_mp4.sh b/benchmark-scripts/test_format_avc_mp4.sh index 4c14d01..ec7d0a0 100755 --- a/benchmark-scripts/test_format_avc_mp4.sh +++ b/benchmark-scripts/test_format_avc_mp4.sh @@ -9,7 +9,7 @@ # test setup: prepare test folder for download test file testFolder=../sample-media/test FILENAME_DOWNLOAD=test/test.mp4 -DEFAULT_FILE_PATH_NAME=../sample-media/test/test-3840-15-bench.mp4 +DEFAULT_FILE_PATH_NAME=../sample-media/test/test-1920-15-bench.mp4 FILE_URL_TO_DOWNLOAD=https://storage.openvinotoolkit.org/data/test_data/videos/smartlab/v3/stream_1_left.mp4 mkdir $testFolder @@ -26,39 +26,9 @@ cleanupTestFolderContent() { rm -f "$testFolder/*" } -# test case 1: test without image +# test case 1: test with image, got statusCode 0 and test media file downloaded (happy path) echo -echo "# test case 1: test without image" -docker image tag sco-soc:2.0 test-soc:2.0 -docker image tag sco-dgpu:2.0 test-dgpu:2.0 -docker rmi sco-soc:2.0 -docker rmi sco-dgpu:2.0 - -FIND_IMAGE_SCO=$(docker images --format "{{.Repository}}" | grep "sco-") - -output=$(./format_avc_mp4.sh $FILENAME_DOWNLOAD $FILE_URL_TO_DOWNLOAD) -statusCode=$? -if [ -z "$FIND_IMAGE_SCO" ] -then - if [ $statusCode == 1 ] - then - echo "test PASSED: test without image and got expected status code of 1" - else - echo "test FAILED: expecting status code 1, but got something else" - fi -else - echo "test FAILED: Image found" -fi -# rename back the images -docker image tag test-soc:2.0 sco-soc:2.0 -docker image tag test-dgpu:2.0 sco-dgpu:2.0 -docker rmi test-soc:2.0 -docker rmi test-dgpu:2.0 -cleanupTestFolderContent - -# test case 2: test with image, got statusCode 0 and test media file downloaded (happy path) -echo -echo "# test case 2: test with image, got statusCode 0 and test media file downloaded (happy path)" +echo "# test case 1: test with image, got statusCode 0 and test media file downloaded (happy path)" output=$(./format_avc_mp4.sh $FILENAME_DOWNLOAD $FILE_URL_TO_DOWNLOAD) statusCode=$? echo "$statusCode" @@ -75,10 +45,10 @@ else fi cleanupTestFolderContent -# test case 3: download 2nd time, expect message "Skipping..." +# test case 2: download 2nd time, expect message "Skipping..." SUB="Skipping..." echo -echo "# test case 3: download 2nd time, expect message \"$SUB\"" +echo "# test case 2: download 2nd time, expect message \"$SUB\"" output=$(./format_avc_mp4.sh $FILENAME_DOWNLOAD $FILE_URL_TO_DOWNLOAD) statusCode=$? if [ $statusCode == 0 ] @@ -107,9 +77,9 @@ fi cleanupTestFolderContent -# test case 4: input resize, expect file name with resize in the file name (happy path) +# test case 3: input resize, expect file name with resize in the file name (happy path) echo -echo "# test case 4: input resize, expect file name with resize in the file name (happy path)" +echo "# test case 3: input resize, expect file name with resize in the file name (happy path)" WIDTH=1080 HEIGHT=720 FPS=10 @@ -128,9 +98,9 @@ else fi cleanupTestFolderContent -# test case 5: input Width should be integer type +# test case 4: input Width should be integer type echo -echo "# test case 5: input Width should be integer type" +echo "# test case 4: input Width should be integer type" WIDTH=8abv HEIGHT=720 FPS=10 @@ -150,9 +120,9 @@ else fi cleanupTestFolderContent -# test case 6: input Height should be integer type +# test case 5: input Height should be integer type echo -echo "# test case 6: input Height should be integer type" +echo "# test case 5: input Height should be integer type" WIDTH=1035 HEIGHT=8.0fd FPS=10 @@ -172,9 +142,9 @@ else fi cleanupTestFolderContent -# test case 7: input FPS should be float or integer type +# test case 6: input FPS should be float or integer type echo -echo "# test case 7: input FPS should be float or integer type" +echo "# test case 6: input FPS should be float or integer type" WIDTH=1035 HEIGHT=335 FPS=a.09 @@ -196,7 +166,3 @@ cleanupTestFolderContent # clean up: remove the test folder cleanupTestFolder - - - - diff --git a/benchmark-scripts/test_streamdensity.sh b/benchmark-scripts/test_streamdensity.sh new file mode 100755 index 0000000..394dcbb --- /dev/null +++ b/benchmark-scripts/test_streamdensity.sh @@ -0,0 +1,72 @@ +#!/usr/bin/env bash +# +# Copyright (C) 2023 Intel Corporation. +# +# SPDX-License-Identifier: Apache-2.0 +# + +# initial setup +( + cd .. + make clean-all + sleep 3 +) + +min_expected=2 +target_fps=14.9 +testDir=mytest1 +increment_hint=5 + +echo "testcase: minimum ${min_expected} streams expected without increments hint" +# testing for no increments hint +PIPELINE_PROFILE="gst" RENDER_MODE=0 sudo -E ./benchmark.sh --stream_density $target_fps --logdir "$testDir" --duration 120 --init_duration 60 \ + --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1 + +statusCode=$? +if [ $statusCode -ne 0 ] +then + echo "test failed: expecting get status code 0 but found $statusCode" +else + # Max stream density achieved for target FPS 12 is at least 1 + res=$(grep -i -Eo "Max stream density achieved for target FPS ([0-9]+(.[0-9]+)*) is ([0-9])+" ../results/stream_density.log | awk -F ' ' '{print $10}') + + if [ -z "${res}" ]; then + echo "test failed: maximum pipeline numbers not found" + elif [ "${res}" -ge "${min_expected}" ]; then + echo "test passed: maximum pipeline number = ${res}" + else + echo "test failed: unable to reach the min. ${min_expected} streams as maximum pipeline number = ${res}" + fi +fi + +sudo rm -rf "$testDir" + +sleep 10 + +echo +echo "testcase: minimum ${min_expected} streams expected with increments hint" +#testing for core system with rtsp, you may need to edit the input source if rtsp is different for camera device +PIPELINE_PROFILE="gst" RENDER_MODE=0 sudo -E ./benchmark.sh --stream_density $target_fps $increment_hint --logdir "$testDir" --duration 120 --init_duration 60 \ + --platform core --inputsrc rtsp://127.0.0.1:8554/camera_1 + +statusCode=$? +if [ $statusCode -ne 0 ] +then + echo "test failed: expecting get status code 0 but found $statusCode" +else + # Max stream density achieved for target FPS 12 is at least 1 + res=$(grep -i -Eo "Max stream density achieved for target FPS ([0-9]+(.[0-9]+)*) is ([0-9])+" ../results/stream_density.log | awk -F ' ' '{print $10}') + + if [ -z "${res}" ]; then + echo "test failed: maximum pipeline numbers not found" + elif [ "${res}" -ge "${min_expected}" ]; then + echo "test passed: maximum pipeline number = ${res}" + else + echo "test failed: unable to reach the min. ${min_expected} streams as maximum pipeline number = ${res}" + fi +fi + +sudo rm -rf "$testDir" + +echo + diff --git a/docker-run-dlstreamer.sh b/docker-run-dlstreamer.sh deleted file mode 100755 index b80af37..0000000 --- a/docker-run-dlstreamer.sh +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash -# -# Copyright (C) 2023 Intel Corporation. -# -# SPDX-License-Identifier: Apache-2.0 -# - -# clean up exited containers -docker rm $(docker ps -a -f name=automated-self-checkout -f status=exited -q) - -export GST_DEBUG=0 - -source benchmark-scripts/get-gpu-info.sh - -if [ -z "$PLATFORM" ] || [ -z "$INPUTSRC" ] -then - source get-options.sh "$@" -fi - -cl_cache_dir=`pwd`/.cl-cache -echo "CLCACHE: $cl_cache_dir" - -#HAS_FLEX_140=$HAS_FLEX_140, HAS_FLEX_170=$HAS_FLEX_170, HAS_ARC=$HAS_ARC - -# TODO: override tag for other images and workloads -if [ $HAS_FLEX_140 == 1 ] || [ $HAS_FLEX_170 == 1 ] || [ $HAS_ARC == 1 ] -then - if [ $OCR_DISABLED == 0 ] - then - echo "OCR device defaulting to dGPU" - OCR_DEVICE=GPU - fi - if [ $PLATFORM == "dgpu" ] - then - echo "Arc/Flex device driver stack" - TAG=sco-dgpu:2.0 - else - TAG=sco-soc:2.0 - echo "SOC (CPU, iGPU, and Xeon SP) device driver stack" - fi - - if [ $HAS_ARC == 1 ]; then - PLATFORM="arc" - fi - -else - echo "SOC (CPU, iGPU, and Xeon SP) device driver stack" - TAG=sco-soc:2.0 -fi - -if [ ! -z "$CONTAINER_IMAGE_OVERRIDE" ] -then - echo "Using container image override $CONTAINER_IMAGE_OVERRIDE" - TAG=$CONTAINER_IMAGE_OVERRIDE -fi - -cids=$(docker ps --filter="name=automated-self-checkout" -q -a) -cid_count=`echo "$cids" | wc -w` -CONTAINER_NAME="automated-self-checkout"$(($cid_count)) -LOG_FILE_NAME="automated-self-checkout"$(($cid_count))".log" - -#echo "barcode_disabled: $BARCODE_DISABLED, barcode_interval: $BARCODE_INTERVAL, ocr_interval: $OCR_INTERVAL, ocr_device: $OCR_DEVICE, ocr_disabled=$OCR_DISABLED, class_disabled=$CLASSIFICATION_DIABLED" -pre_process="" -if grep -q "rtsp" <<< "$INPUTSRC"; then - # rtsp - # todo pass depay info - inputsrc=$INPUTSRC" ! rtph264depay " - INPUTSRC_TYPE="RTSP" - pre_process="pre-process-backend=vaapi-surface-sharing -e pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" - - -elif grep -q "file" <<< "$INPUTSRC"; then - # filesrc - arrfilesrc=(${INPUTSRC//:/ }) - # use vids since container maps a volume to this location based on sample-media folder - # TODO: need to pass demux/codec info - inputsrc="filesrc location=vids/"${arrfilesrc[1]}" ! qtdemux ! h264parse " - INPUTSRC_TYPE="FILE" - decode_type="vaapidecodebin" - pre_process="pre-process-backend=vaapi-surface-sharing -e pre-process-config=VAAPI_FAST_SCALE_LOAD_FACTOR=1" - -elif grep -q "video" <<< "$INPUTSRC"; then - # v4l2src /dev/video* - # TODO need to pass stream info - inputsrc="v4l2src device="$INPUTSRC - INPUTSRC_TYPE="USB" - TARGET_USB_DEVICE="--device=$INPUTSRC" - decode_type="videoconvert ! video/x-raw,format=BGR" - pre_process="" - -else - # rs-serial realsenssrc - # TODO need to pass depthalign info - inputsrc="realsensesrc cam-serial-number="$INPUTSRC" stream-type=0 align=0 imu_on=false" - # add realsense color related properties if any - if [ "$COLOR_WIDTH" != 0 ]; then - inputsrc=$inputsrc" color-width="$COLOR_WIDTH - fi - if [ "$COLOR_HEIGHT" != 0 ]; then - inputsrc=$inputsrc" color-height="$COLOR_HEIGHT - fi - if [ "$COLOR_FRAMERATE" != 0 ]; then - inputsrc=$inputsrc" color-framerate="$COLOR_FRAMERATE - fi - INPUTSRC_TYPE="REALSENSE" - decode_type="decodebin ! videoconvert ! video/x-raw,format=BGR" - pre_process="" - cameras=`ls /dev/vid* | while read line; do echo "--device=$line"; done` - TARGET_GPU_DEVICE=$TARGET_GPU_DEVICE" "$cameras -fi - -if [ "${OCR_DISABLED}" == "0" ] && [ "${BARCODE_DISABLED}" == "0" ] && [ "${CLASSIFICATION_DISABLED}" == "0" ] && [ "${REALSENSE_ENABLED}" == "0" ]; then - pipeline="yolov5s_full.sh" - -elif [ "${OCR_DISABLED}" == "1" ] && [ "${BARCODE_DISABLED}" == "1" ] && [ "${CLASSIFICATION_DISABLED}" == "1" ]; then - pipeline="yolov5s.sh" -elif [ "${OCR_DISABLED}" == "1" ] && [ "${BARCODE_DISABLED}" == "1" ] && [ "${CLASSIFICATION_DISABLED}" == "0" ]; then - pipeline="yolov5s_effnetb0.sh" -elif [ "${REALSENSE_ENABLED}" == "1" ]; then - # TODO: this will not work for diff pipelines like _full and _effnetb0 etc - pipeline="yolov5s_realsense.sh" - -else - echo "Not implemented" - exit 0 -fi - -# Set RENDER_MODE=1 for demo purposes only -RUN_MODE="-itd" -if [ "$RENDER_MODE" == 1 ] -then - RUN_MODE="-it" -fi - -bash_cmd="/home/pipeline-server/framework-pipelines/$PLATFORM/$pipeline" -if [ "$STREAM_DENSITY_MODE" == 1 ]; then - echo "Starting Stream Density" - bash_cmd="./stream_density_framework-pipelines.sh framework-pipelines/$PLATFORM/$pipeline" - stream_density_mount="-v `pwd`/configs/dlstreamer/framework-pipelines/stream_density.sh:/home/pipeline-server/stream_density_framework-pipelines.sh" - stream_density_params="-e STREAM_DENSITY_FPS=$STREAM_DENSITY_FPS -e STREAM_DENSITY_INCREMENTS=$STREAM_DENSITY_INCREMENTS -e COMPLETE_INIT_DURATION=$COMPLETE_INIT_DURATION" - echo "DEBUG: $stream_density_params" -fi - -#echo "DEBUG: $TARGET_GPU_DEVICE $PLATFORM $HAS_FLEX_140" -if [ "$TARGET_GPU_DEVICE" == "--privileged" ] && [ "$PLATFORM" == "dgpu" ] && [ $HAS_FLEX_140 == 1 ] -then - if [ "$STREAM_DENSITY_MODE" == 1 ]; then - # override logic in workload script so stream density can manage it - AUTO_SCALE_FLEX_140=2 - else - # allow workload to manage autoscaling - AUTO_SCALE_FLEX_140=1 - fi -fi - -# make sure models are downloaded or existing: -./download_models/getModels.sh --workload dlstreamer - -docker run --network host $cameras $TARGET_USB_DEVICE $TARGET_GPU_DEVICE --user root --ipc=host --name automated-self-checkout$cid_count -e RENDER_MODE=$RENDER_MODE $stream_density_mount -e INPUTSRC_TYPE=$INPUTSRC_TYPE -e DISPLAY=$DISPLAY -e cl_cache_dir=/home/pipeline-server/.cl-cache -v $cl_cache_dir:/home/pipeline-server/.cl-cache -v /tmp/.X11-unix:/tmp/.X11-unix -v `pwd`/sample-media/:/home/pipeline-server/vids -v `pwd`/configs/dlstreamer/pipelines:/home/pipeline-server/pipelines -v `pwd`/configs/dlstreamer/extensions:/home/pipeline-server/extensions -v `pwd`/results:/tmp/results -v `pwd`/configs/dlstreamer/models/2022:/home/pipeline-server/models -v `pwd`/configs/dlstreamer/framework-pipelines:/home/pipeline-server/framework-pipelines -w /home/pipeline-server -e BARCODE_RECLASSIFY_INTERVAL=$BARCODE_INTERVAL -e OCR_RECLASSIFY_INTERVAL=$OCR_INTERVAL -e OCR_DEVICE=$OCR_DEVICE -e LOG_LEVEL=$LOG_LEVEL -e GST_DEBUG=$GST_DEBUG -e decode_type="$decode_type" -e pre_process="$pre_process" -e LOW_POWER="$LOW_POWER" -e cid_count=$cid_count -e inputsrc="$inputsrc" $RUN_MODE $stream_density_params -e CPU_ONLY="$CPU_ONLY" -e AUTO_SCALE_FLEX_140="$AUTO_SCALE_FLEX_140" $TAG bash -c "bash $bash_cmd" diff --git a/docker-run.sh b/docker-run.sh deleted file mode 100755 index bf1a328..0000000 --- a/docker-run.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -# -# Copyright (C) 2023 Intel Corporation. -# -# SPDX-License-Identifier: Apache-2.0 -# - -source benchmark-scripts/get-gpu-info.sh -# WORKLOAD_SCRIPT is env varilable will be overwritten by --workload input option -WORKLOAD_SCRIPT="docker-run-dlstreamer.sh" - -if [ -z "$PLATFORM" ] || [ -z "$INPUTSRC" ] -then - source get-options.sh "$@" -fi - -echo "running $WORKLOAD_SCRIPT" -./$WORKLOAD_SCRIPT "$@"