Skip to content

Commit 7712b78

Browse files
authored
Merge branch 'main' into gst_to_1_26_10
2 parents 1489e6b + 4b01570 commit 7712b78

File tree

14 files changed

+267
-142
lines changed

14 files changed

+267
-142
lines changed

.github/workflows/dls-pr-workflow.yaml

Lines changed: 10 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -120,14 +120,13 @@ jobs:
120120
DLS_COVERITY_EMAIL: ${{ secrets.DLS_COVERITY_EMAIL }}
121121
DLS_COVERITY_PROJECT: ${{ secrets.DLS_COVERITY_PROJECT }}
122122

123-
dls-filter-docker-or-optimizer-related-changes:
123+
dls-filter-docker-related-changes:
124124
permissions:
125125
contents: read
126-
name: "DLS SCAN: detect changes in docker and optimizer dir"
126+
name: "DLS SCAN: detect changes in docker dir"
127127
runs-on: dlstreamer
128128
outputs:
129129
docker_changed: ${{ steps.check.outputs.docker_changed }}
130-
optimizer_changed: ${{ steps.check.outputs.optimizer_changed }}
131130
steps:
132131
- name: Check out dlstreamer repository
133132
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd #6.0.2
@@ -141,33 +140,22 @@ jobs:
141140
cd dlstreamer-repo
142141
git fetch origin main
143142
144-
- name: Detect changes in docker or optimizer directory
143+
- name: Detect changes in docker directory
145144
id: check
146145
run: |
147-
echo "🔍 Checking for changes in 'docker/' and 'scripts/optimizer'..."
146+
echo "🔍 Checking for changes in 'docker/' dir"
148147
cd dlstreamer-repo
149148
CHANGED_FILES_DOCKER=$(git diff --name-only origin/main...HEAD -- 'docker/')
150-
CHANGED_FILES_OPTIMIZER=$(git diff --name-only origin/main...HEAD -- 'scripts/optimizer')
151149
152150
echo "docker_changed=false" >> "$GITHUB_OUTPUT"
153-
echo "optimizer_changed=false" >> "$GITHUB_OUTPUT"
154151
155152
if [ -n "${CHANGED_FILES_DOCKER}" ]; then
156153
echo "📄 Changed Docker-related files:"
157154
echo "${CHANGED_FILES_DOCKER}"
158155
echo "docker_changed=true" >> "$GITHUB_OUTPUT"
159156
echo "🟡 Docker-related changes detected."
160-
fi
161-
162-
if [ -n "${CHANGED_FILES_OPTIMIZER}" ]; then
163-
echo "📄 Changed Optimizer-related files:"
164-
echo "${CHANGED_FILES_OPTIMIZER}"
165-
echo "optimizer_changed=true" >> "$GITHUB_OUTPUT"
166-
echo "🟡 Optimizer-related changes detected."
167-
fi
168-
169-
if [ -z "${CHANGED_FILES_DOCKER}" ] && [ -z "${CHANGED_FILES_OPTIMIZER}" ]; then
170-
echo "✅ No docker or optimizer related changes."
157+
else
158+
echo "✅ No docker related changes."
171159
fi
172160
173161
- name: Clean up
@@ -177,8 +165,8 @@ jobs:
177165
dls-trivy-config-scan:
178166
permissions:
179167
contents: read
180-
needs: [dls-filter-docker-or-optimizer-related-changes]
181-
if: needs.dls-filter-docker-or-optimizer-related-changes.outputs.docker_changed == 'true'
168+
needs: [dls-filter-docker-related-changes]
169+
if: needs.dls-filter-docker-related-changes.outputs.docker_changed == 'true'
182170
name: "DLS SCAN: Trivy ${{ matrix.name }}"
183171
strategy:
184172
fail-fast: false
@@ -208,8 +196,8 @@ jobs:
208196
permissions:
209197
contents: read
210198
pull-requests: write
211-
needs: [dls-filter-docker-or-optimizer-related-changes]
212-
if: needs.dls-filter-docker-or-optimizer-related-changes.outputs.docker_changed == 'true'
199+
needs: [dls-filter-docker-related-changes]
200+
if: needs.dls-filter-docker-related-changes.outputs.docker_changed == 'true'
213201
name: "DLS SCAN: Hadolint"
214202
runs-on: dlstreamer
215203
strategy:
@@ -405,8 +393,6 @@ jobs:
405393
permissions:
406394
contents: read
407395
packages: read
408-
needs: [dls-filter-docker-or-optimizer-related-changes]
409-
if: needs.dls-filter-docker-or-optimizer-related-changes.outputs.optimizer_changed == 'true'
410396
name: "DLS TEST: Optimizer"
411397
uses: ./.github/workflows/dls-test-optimizer.yaml
412398
with:

.github/workflows/dls-test-optimizer.yaml

Lines changed: 32 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,14 +144,15 @@ jobs:
144144
-w /workspace \
145145
${deb_final_img} \
146146
bash /workspace/optimizer_tests/scripts/run_optimizer_tests.sh --config-file "/workspace/optimizer_tests/test_config.json"
147+
147148
- name: Upload Docker test results
148149
if: always()
149150
uses: actions/upload-artifact@bbbca2ddaa5d8feaa63e36b76fdaad77386f024f #7.0.0
150151
with:
151152
name: DLS_optimizer_Docker_${{ matrix.runner_print_label }}_${{ matrix.ubuntu_version }}_results
152153
path: ${{ env.OPTIMIZER_TESTS_PATH }}/optimizer_results/docker/FINAL_TEST_REPORT.txt
153154

154-
# ======================================================== OH HOST TESTING PART ========================================================
155+
# ======================================================== ON HOST TESTING PART ========================================================
155156
- name: Link DL Streamer to home directory
156157
if: always()
157158
run: |
@@ -160,7 +161,9 @@ jobs:
160161
if: always()
161162
run: |
162163
$DLS_REL_PATH/tests/scripts/installation-on-host-entrypoint.sh $DLS_REL_PATH/deb_packages
164+
163165
- name: Run Optimizer on host
166+
id: run_optimizer_on_host
164167
if: always()
165168
run: |
166169
mkdir $OPTIMIZER_TESTS_PATH/optimizer_results/host
@@ -172,6 +175,34 @@ jobs:
172175
with:
173176
name: DLS_optimzier_host_${{ matrix.runner_print_label }}_${{ matrix.ubuntu_version }}_results
174177
path: ${{ env.OPTIMIZER_TESTS_PATH }}/optimizer_results/host/FINAL_TEST_REPORT.txt
178+
179+
# ======================================================== TEST RESULTS SUMMARY ========================================================
180+
- name: Add optimizer test results to summary
181+
env:
182+
DOCKER_OUTCOME: ${{ steps.run_optimizer_in_docker.outcome }}
183+
HOST_OUTCOME: ${{ steps.run_optimizer_on_host.outcome }}
184+
if: always()
185+
run: |
186+
echo "" >> $GITHUB_STEP_SUMMARY
187+
echo "## 🧪 Optimizer Test Results" >> $GITHUB_STEP_SUMMARY
188+
echo "" >> $GITHUB_STEP_SUMMARY
189+
echo "| Test Environment | Status |" >> $GITHUB_STEP_SUMMARY
190+
echo "|------------------|--------|" >> $GITHUB_STEP_SUMMARY
191+
192+
# Docker test status
193+
if [ "$DOCKER_OUTCOME" == "success" ]; then
194+
echo "| Optimizer in Docker | ✅ **PASSED** |" >> $GITHUB_STEP_SUMMARY
195+
else
196+
echo "| Optimizer in Docker | ❌ **FAILED** |" >> $GITHUB_STEP_SUMMARY
197+
fi
198+
199+
# Host test status
200+
if [ "$HOST_OUTCOME" == "success" ]; then
201+
echo "| Optimizer on Host | ✅ **PASSED** |" >> $GITHUB_STEP_SUMMARY
202+
else
203+
echo "| Optimizer on Host | ❌ **FAILED** |" >> $GITHUB_STEP_SUMMARY
204+
fi
205+
175206
- name: Uninstall dlstreamer
176207
if: always ()
177208
run: |

SPECS/download_sources.sh

Lines changed: 5 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -73,16 +73,14 @@ main() {
7373
done
7474

7575
# Download the DL Streamer src code
76-
cd ../../..
77-
git submodule update --init libraries/dl-streamer/thirdparty/spdlog
78-
cd libraries
76+
REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
77+
cd "$REPO_ROOT"
78+
git submodule update --init thirdparty/spdlog
7979
rm -rf ~/intel-dlstreamer-${DLSTREAMER_VERSION}*
80-
cp -r dl-streamer ~
81-
mv ~/dl-streamer ~/intel-dlstreamer-${DLSTREAMER_VERSION}
80+
cp -r "$REPO_ROOT" ~/intel-dlstreamer-${DLSTREAMER_VERSION}
8281
cd ~
8382
tar czf intel-dlstreamer-${DLSTREAMER_VERSION}.tar.gz intel-dlstreamer-${DLSTREAMER_VERSION}
84-
cd -
85-
mv ~/intel-dlstreamer-${DLSTREAMER_VERSION}.tar.gz dl-streamer/SPECS/
83+
mv ~/intel-dlstreamer-${DLSTREAMER_VERSION}.tar.gz "$SCRIPT_DIR/"
8684
log_info ""
8785
}
8886

SPECS/intel-dlstreamer/intel-dlstreamer.spec

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
%define debug_package %{nil}
22
Name: intel-dlstreamer
3-
Version: 2025.2.0
3+
Version: 2026.0.0
44
Release: 1%{?dist}
55
Summary: Intel Deep Learning Streamer framework
66

@@ -20,14 +20,14 @@ BuildRequires: libva-devel libva-intel-media-driver
2020
BuildRequires: python3-devel python3-pip
2121
BuildRequires: pkgconfig patchelf
2222
BuildRequires: opencv-devel >= 4.12.0
23-
BuildRequires: gstreamer-devel >= 1.26.1
23+
BuildRequires: gstreamer-devel >= 1.26.6
2424
BuildRequires: paho-mqtt-c-devel >= 1.3.4
2525
BuildRequires: librdkafka-devel
2626

2727
# Runtime dependencies
2828
Requires: paho-mqtt-c-devel >= 1.3.4
2929
Requires: ffmpeg >= 6.1.1
30-
Requires: gstreamer >= 1.26.1
30+
Requires: gstreamer >= 1.26.6
3131
Requires: opencv >= 4.12.0
3232
Requires: libva2 libva-intel-media-driver
3333
Requires: python3 python3-pip python3-gobject
@@ -139,6 +139,8 @@ rm -rf %{buildroot}
139139
/opt/intel/dlstreamer/lib/pkgconfig/*
140140

141141
%changelog
142+
* Wed Mar 25 2026 DL Streamer Team <dlstreamer@intel.com> - 2026.0.0-1
143+
- Update DL Streamer version
142144
* Wed Dec 02 2025 DL Streamer Team <dlstreamer@intel.com> - 2025.2.0-1
143145
- Update DL Streamer version
144146
* Thu Aug 07 2025 DL Streamer Team <dlstreamer@intel.com> - 2025.1.2-1

docs/user-guide/dev_guide/advanced_install/advanced_install_guide_compilation.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -150,7 +150,7 @@ packages:
150150

151151
### (Optional) Step 6: Install OpenVINO™ GenAI (only for Ubuntu)
152152

153-
To use [gvagenai element](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/elements/gvagenai.html)
153+
To use [gvagenai element](https://docs.openedgeplatform.intel.com/dev/edge-ai-libraries/dlstreamer/elements/gvagenai.html)
154154
you need to install the [OpenVINO GenAI archive](https://docs.openvino.ai/2026/get-started/install-openvino/install-openvino-genai.html) package.
155155

156156
<!--hide_directive::::{tab-set}

docs/user-guide/dev_guide/lvms.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
This article explains how to prepare models based on the [Hugging Face](https://huggingface.co/welcome) [`transformers`](https://github.com/huggingface/transformers) library for integration with the Deep Learning Streamer pipeline.
44

5-
Many transformer-based models can be converted to OpenVINO™ IR format using [optimum-cli](https://huggingface.co/docs/optimum-intel/en/openvino/export). DL Streamer supports selected Hugging Face architectures for tasks such as image classification, object detection, audio transcription, and more. See the [Supported Models](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/supported_models.html) table for details.
5+
Many transformer-based models can be converted to OpenVINO™ IR format using [optimum-cli](https://huggingface.co/docs/optimum-intel/en/openvino/export). DL Streamer supports selected Hugging Face architectures for tasks such as image classification, object detection, audio transcription, and more. See the [Supported Models](https://docs.openedgeplatform.intel.com/dev/edge-ai-libraries/dlstreamer/supported_models.html) table for details.
66

77
> **NOTE:** The instructions below are comprehensive, but for convenience, we recommend using the
88
> [download_hf_models.py](https://github.com/open-edge-platform/dlstreamer/blob/main/scripts/download_models/download_hf_models.py)
@@ -12,7 +12,7 @@ Many transformer-based models can be converted to OpenVINO™ IR format using [o
1212

1313
## Optimum-Intel Supported Models
1414

15-
The list available [here](https://huggingface.co/docs/optimum-intel/en/openvino/models) includes models that can be converted to IR format with a single `optimum-cli` command. If a model architecture is [supported by DL Streamer](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/supported_models.html#supported-architectures), it can typically be prepared as follows:
15+
The list available [here](https://huggingface.co/docs/optimum-intel/en/openvino/models) includes models that can be converted to IR format with a single `optimum-cli` command. If a model architecture is [supported by DL Streamer](https://docs.openedgeplatform.intel.com/dev/edge-ai-libraries/dlstreamer/supported_models.html#supported-architectures), it can typically be prepared as follows:
1616

1717
```bash
1818
optimum-cli export openvino --model provider_id/model_id --weight-format=int8 output_path

docs/user-guide/dev_guide/yolo_models.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ integration with the Deep Learning Streamer pipeline.
55

66
## Ultralytics Model Preparation
77

8-
All models supported by the [ultralytics/ultralytics](https://github.com/ultralytics/ultralytics) library can be converted to OpenVINO™ IR format by using the [Ultralytics exporter](https://docs.ultralytics.com/integrations/openvino/). DL Streamer supports many Ultralytics YOLO architectures for tasks such as zero-shot object detection, oriented object detection, segmentation, pose estimation, and more. See the [Supported Models](https://docs.openedgeplatform.intel.com/2026.0/edge-ai-libraries/dlstreamer/supported_models.html) table for details.
8+
All models supported by the [ultralytics/ultralytics](https://github.com/ultralytics/ultralytics) library can be converted to OpenVINO™ IR format by using the [Ultralytics exporter](https://docs.ultralytics.com/integrations/openvino/). DL Streamer supports many Ultralytics YOLO architectures for tasks such as zero-shot object detection, oriented object detection, segmentation, pose estimation, and more. See the [Supported Models](https://docs.openedgeplatform.intel.com/dev/edge-ai-libraries/dlstreamer/supported_models.html) table for details.
99

1010
> **NOTE:** The instructions below are comprehensive, but for convenience, we recommend using the
1111
> [download_ultralytics_models.py](https://github.com/open-edge-platform/dlstreamer/blob/main/scripts/download_models/download_ultralytics_models.py)

docs/user-guide/index.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ or at the Edge. DL Streamer consists of:
2121
for designing, creating, building, and running media analytics
2222
pipelines. It includes C++ and Python APIs.
2323
- [Deep Learning Streamer Pipeline
24-
Server](https://github.com/open-edge-platform/edge-ai-libraries/tree/release-2026.0.0/microservices/dlstreamer-pipeline-server)
24+
Server](https://github.com/open-edge-platform/edge-ai-libraries/tree/main/microservices/dlstreamer-pipeline-server)
2525
for deploying and scaling media analytics pipelines as
2626
micro-services on one or many compute nodes. It includes REST APIs
2727
for pipelines management.
@@ -92,7 +92,7 @@ FasterRCNN, and other models.
9292
reference apps for the most common media analytics use cases. They are
9393
included in
9494
[Deep Learning Streamer Pipeline Framework](https://github.com/open-edge-platform/dlstreamer/tree/main),
95-
[Deep Learning Streamer Pipeline Server](https://github.com/open-edge-platform/edge-ai-libraries/tree/release-2026.0.0/microservices/dlstreamer-pipeline-server),
95+
[Deep Learning Streamer Pipeline Server](https://github.com/open-edge-platform/edge-ai-libraries/tree/main/microservices/dlstreamer-pipeline-server),
9696
[Open Visual Cloud](https://github.com/OpenVisualCloud), and
9797
[Intel® Edge Software Hub](https://www.intel.com/content/www/us/en/edge-computing/edge-software-hub.html)
9898
The samples demonstrate C++ and/or Python based: Action Recognition, Face Detection and

0 commit comments

Comments
 (0)