Merge branch 'mr/camera_example' into 'main'

Add camera example

See merge request app-frameworks/esp-matter!1330
This commit is contained in:
Hrishikesh Dhayagude
2026-01-08 23:44:59 +08:00
32 changed files with 5855 additions and 3 deletions
+20 -1
View File
@@ -28,7 +28,7 @@ variables:
IDF_CHECKOUT_REF: "v5.4.1" IDF_CHECKOUT_REF: "v5.4.1"
# This variable represents the short hash of the connectedhomeip submodule. # This variable represents the short hash of the connectedhomeip submodule.
# Note: Do change this short hash on submodule update MRs. # Note: Do change this short hash on submodule update MRs.
CHIP_SHORT_HASH: "faf4d09ad1" CHIP_SHORT_HASH: "71d5170272"
DOCKER_IMAGE_NAME: "espressif/chip-idf" DOCKER_IMAGE_NAME: "espressif/chip-idf"
.add_gitlab_ssh_key: &add_gitlab_ssh_key | .add_gitlab_ssh_key: &add_gitlab_ssh_key |
@@ -93,6 +93,11 @@ variables:
- git clone --depth 1 --recurse-submodules --shallow-submodules https://github.com/espressif/esp-rainmaker.git - git clone --depth 1 --recurse-submodules --shallow-submodules https://github.com/espressif/esp-rainmaker.git
- cd ${ESP_RMAKER_PATH}/examples/matter/ - cd ${ESP_RMAKER_PATH}/examples/matter/
.setup_kvs_sdk: &setup_kvs_sdk
- cd ${CI_PROJECT_DIR}
- git clone -b beta-reference-esp-port --single-branch --depth 1 --recurse-submodules https://github.com/awslabs/amazon-kinesis-video-streams-webrtc-sdk-c.git
- export KVS_SDK_PATH=${CI_PROJECT_DIR}/amazon-kinesis-video-streams-webrtc-sdk-c
.build_matter_examples: &build_matter_examples .build_matter_examples: &build_matter_examples
- export MATTER_EXAMPLES_PATH=$ESP_MATTER_PATH/connectedhomeip/connectedhomeip/examples - export MATTER_EXAMPLES_PATH=$ESP_MATTER_PATH/connectedhomeip/connectedhomeip/examples
- cd $ESP_MATTER_PATH/connectedhomeip/connectedhomeip - cd $ESP_MATTER_PATH/connectedhomeip/connectedhomeip
@@ -611,6 +616,20 @@ build_managed_component_light:
- idf.py set-target esp32c3 - idf.py set-target esp32c3
- idf.py build - idf.py build
build_esp_matter_camera_example:
extends:
- .build_examples_template
before_script:
- *setup_idf
- *setup_matter
- *setup_kvs_sdk
variables:
KVS_SDK_PATH: ${CI_PROJECT_DIR}/amazon-kinesis-video-streams-webrtc-sdk-c
script:
- cd ${ESP_MATTER_PATH}/examples/camera
- idf.py set-target esp32c6
- idf.py build
# This job is allowed to fail due to a circular dependency with esp-rainmaker. # This job is allowed to fail due to a circular dependency with esp-rainmaker.
build_esp_rainmaker_apps: build_esp_rainmaker_apps:
stage: build stage: build
+1 -1
View File
@@ -30,7 +30,7 @@ section in the ESP-Matter Programming Guide.
## Supported ESP-IDF and connectedhomeip versions ## Supported ESP-IDF and connectedhomeip versions
- This SDK currently works with commit [faf4d09ad1] (https://github.com/project-chip/connectedhomeip/tree/faf4d09ad1) of connectedhomeip. - This SDK currently works with commit [71d5170272] (https://github.com/project-chip/connectedhomeip/tree/71d5170272) of connectedhomeip.
- For Matter projects development with this SDK, it is recommended to utilize ESP-IDF [v5.4.1](https://github.com/espressif/esp-idf/tree/v5.4.1). - For Matter projects development with this SDK, it is recommended to utilize ESP-IDF [v5.4.1](https://github.com/espressif/esp-idf/tree/v5.4.1).
- For ESP32C5 and ESP32C61, it is recommended to utilize ESP-IDF [v5.5.1](https://github.com/espressif/esp-idf/tree/v5.5.1). - For ESP32C5 and ESP32C61, it is recommended to utilize ESP-IDF [v5.5.1](https://github.com/espressif/esp-idf/tree/v5.5.1).
+5
View File
@@ -140,3 +140,8 @@ examples/bridge_apps/esp_rainmaker_bridge:
- if: IDF_TARGET in ["esp32s3"] - if: IDF_TARGET in ["esp32s3"]
temporary: true temporary: true
reason: the other targets are not tested yet reason: the other targets are not tested yet
examples/camera:
enable:
- if: IDF_TARGET in [""]
temporary: true
reason: Another CI has been added
+59
View File
@@ -0,0 +1,59 @@
# The following lines of boilerplate have to be in your project's
# CMakeLists in this exact order for cmake to work correctly
cmake_minimum_required(VERSION 3.5)
if(NOT DEFINED ENV{ESP_MATTER_PATH})
message(FATAL_ERROR "Please set ESP_MATTER_PATH to the path of esp-matter repo")
endif(NOT DEFINED ENV{ESP_MATTER_PATH})
if(NOT DEFINED ENV{ESP_MATTER_DEVICE_PATH})
if("${IDF_TARGET}" STREQUAL "esp32" OR "${IDF_TARGET}" STREQUAL "")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32_devkit_c)
elseif("${IDF_TARGET}" STREQUAL "esp32c3")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32c3_devkit_m)
elseif("${IDF_TARGET}" STREQUAL "esp32c2")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32c2_devkit_m)
elseif("${IDF_TARGET}" STREQUAL "esp32h2")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32h2_devkit_c)
elseif("${IDF_TARGET}" STREQUAL "esp32s3")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32s3_devkit_c)
elseif("${IDF_TARGET}" STREQUAL "esp32c6")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32c6_devkit_c)
elseif("${IDF_TARGET}" STREQUAL "esp32c5")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/esp32c5_devkit_c)
elseif("${IDF_TARGET}" STREQUAL "esp32p4")
set(ENV{ESP_MATTER_DEVICE_PATH} $ENV{ESP_MATTER_PATH}/device_hal/device/hollow)
else()
message(FATAL_ERROR "Unsupported IDF_TARGET")
endif()
endif(NOT DEFINED ENV{ESP_MATTER_DEVICE_PATH})
set(PROJECT_VER "1.0")
set(PROJECT_VER_NUMBER 1)
set(ESP_MATTER_PATH $ENV{ESP_MATTER_PATH})
set(MATTER_SDK_PATH ${ESP_MATTER_PATH}/connectedhomeip/connectedhomeip)
# Enable signalling only mode
set(ENABLE_SIGNALLING_ONLY "enable only the signalling" ON)
set(ENABLE_STREAMING_ONLY "enable only the streaming" OFF)
# This should be done before using the IDF_TARGET variable.
include($ENV{IDF_PATH}/tools/cmake/project.cmake)
include(${ESP_MATTER_PATH}/examples/common/cmake_common/components_include.cmake)
include($ENV{ESP_MATTER_DEVICE_PATH}/esp_matter_device.cmake)
set(EXTRA_COMPONENT_DIRS
"${ESP_MATTER_PATH}/examples/common"
"${MATTER_SDK_PATH}/config/esp32/components"
"${ESP_MATTER_PATH}/components"
"${ESP_MATTER_PATH}/device_hal/device"
${extra_components_dirs_append})
project(camera)
idf_build_set_property(CXX_COMPILE_OPTIONS "-std=gnu++17;-Os;-DCHIP_HAVE_CONFIG_H;-Wno-overloaded-virtual" APPEND)
idf_build_set_property(C_COMPILE_OPTIONS "-Os" APPEND)
# For RISCV chips, project_include.cmake sets -Wno-format, but does not clear various
# flags that depend on -Wformat
idf_build_set_property(COMPILE_OPTIONS "-Wno-format-nonliteral;-Wno-format-security" APPEND)
+126
View File
@@ -0,0 +1,126 @@
# Matter Camera
This example demonstrates Matter camera using a two-chip split architecture,
where signaling and media streaming are separated across two processors
for optimal power efficiency.
## Architecture Overview
The split mode consists of two separate firmware images:
### 1. **matter_camera** (ESP32-C6)
- **Role**: Matter camera with WebRTC signaling integration
- **Responsibilities**:
- Matter stack execution
- WebRTC signaling
- Bridge communication with media_adapter
- Always-on connectivity for instant responsiveness
### 2. **media_adapter** (ESP32-P4)
- **Role**: Media streaming device
- **Implementation**: Uses the `streaming_only` example from
`${KVS_SDK_PATH}/esp_port/examples/streaming_only`
- **Responsibilities**:
- Video/audio capture and encoding
- WebRTC media streaming
- Power-optimized operation (sleeps when not streaming)
- Receives signaling commands via bridge from matter_camera
## System Architecture
```
┌─────────────────┐ SDIO Bridge ┌─────────────────┐
│ ESP32-C6 │◄────────────────────►│ ESP32-P4 │
│ (matter_camera) │ Communication │ (media_adapter) │
│ │ │ │
│ ┌─────────────┐ │ │ ┌─────────────┐ │
│ │ │ │ │ │ H.264 │ │
│ │ Matter │ │ │ │ Encoder │ │
│ │ │ │ │ │ │ │
│ │ Signaling │ │ │ │ Camera │ │
│ │ │ │ │ │ Interface │ │
│ └─────────────┘ │ │ └─────────────┘ │
└─────────────────┘ └─────────────────┘
▲ ▲
│ │
▼ ▼
(Signaling) Video/Audio
Hardware
```
## Quick Start
### Prerequisites
- [ESP32-P4 Function EV Board](https://docs.espressif.com/projects/esp-dev-kits/en/latest/esp32p4/esp32-p4-function-ev-board/user_guide.html)
- [ESP-IDF v5.5.1](https://github.com/espressif/esp-idf/releases/tag/v5.5.1)
- [Amazon Kinesis Video Streams WebRTC SDK repository](https://github.com/awslabs/amazon-kinesis-video-streams-webrtc-sdk-c/tree/beta-reference-esp-port)
**Important**: This requires flashing two separate firmwares on
ESP32-C6 and ESP32-P4 of `ESP32-P4 Function EV Board`
### Setup Camera example (ESP32-C6)
See the [docs](https://docs.espressif.com/projects/esp-matter/en/latest/esp32/developing.html) to setup esp-idf and esp-matter
Build and flash
```bash
cd esp-matter/examples/camera
idf.py set-target esp32c6
idf.py build
idf.py -p [PORT] flash monitor
```
**NOTE**:
- ESP32-C6 does not have an onboard UART port. You will need to use [ESP-Prog](https://docs.espressif.com/projects/esp-iot-solution/en/latest/hw-reference/ESP-Prog_guide.html) board or any other JTAG.
- Use following Pin Connections:
| ESP32-C6 (J2/Prog-C6) | ESP-Prog |
|----------|----------|
| IO0 | IO9 |
| TX0 | TXD0 |
| RX0 | RXD0 |
| EN | EN |
| GND | GND |
### Setup Media adapter (ESP32-P4)
Clone and setup the WebRTC SDK
```bash
git clone https://github.com/awslabs/amazon-kinesis-video-streams-webrtc-sdk-c.git
git checkout beta-reference-esp-port
git submodule update --init --depth 1
export KVS_SDK_PATH=/path/to/amazon-kinesis-video-streams-webrtc-sdk-c
```
Build and flash
```bash
cd ${KVS_SDK_PATH}/esp_port/examples/streaming_only
idf.py set-target esp32p4
idf.py menuconfig
# Go to Component config -> ESP System Settings -> Channel for console output
# (X) USB Serial/JTAG Controller # For ESP32-P4 Function_EV_Board V1.2 OR V1.5
# (X) Default: UART0 # For ESP32-P4 Function_EV_Board V1.4
idf.py build
idf.py -p [PORT] flash monitor
```
**Note**: If the console selection is wrong, you will only see the initial
bootloader logs. Please change the console as instructed above and reflash the
app to see the complete logs.
**Note**: Currently, due to flash size limitations of ESP32-C6 onboard the
ESP32-P4 Function EV Board, the `ota_1` partition (see
[`partitions.csv`](partitions.csv)) is disabled and the size of the `ota_0`
partition is increased. This prevents the firmware from performing OTA updates.
Hence, this configuration is not recommended for production use.
### Testing
You can use any Matter based camera controller app to view the video feed. Alternatively, you can also use the [camera controller example](https://github.com/project-chip/connectedhomeip/tree/master/examples/camera-controller) from the connnectedhomeip repository.
+8
View File
@@ -0,0 +1,8 @@
set(SRC_DIRS_LIST "." "clusters" "common" "webrtc")
set(PRIV_INCLUDE_DIRS_LIST "." "clusters" "common" "webrtc")
idf_component_register(SRC_DIRS ${SRC_DIRS_LIST}
PRIV_INCLUDE_DIRS ${PRIV_INCLUDE_DIRS_LIST} "${ESP_MATTER_PATH}/examples/common/utils")
set_property(TARGET ${COMPONENT_LIB} PROPERTY CXX_STANDARD 17)
target_compile_options(${COMPONENT_LIB} PRIVATE "-DCHIP_HAVE_CONFIG_H")
+59
View File
@@ -0,0 +1,59 @@
/*
This example code is in the Public Domain (or CC0 licensed, at your option.)
Unless required by applicable law or agreed to in writing, this
software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
*/
#include <esp_log.h>
#include <stdlib.h>
#include <string.h>
#include <app_priv.h>
#include <esp_matter.h>
#include <button_gpio.h>
#include <device.h>
#include <led_driver.h>
using namespace chip::app::Clusters;
using namespace esp_matter;
static const char *TAG = "app_driver";
extern uint16_t camera_endpoint_id;
bool deferred_offer = false;
static void app_driver_button_toggle_cb(void *arg, void *data) {
ESP_LOGI(TAG, "Toggle button pressed");
uint16_t endpoint_id = camera_endpoint_id;
}
static void app_driver_button_double_click_cb(void *arg, void *data) {
ESP_LOGI(TAG, "Double Click");
if (deferred_offer) {
ESP_LOGI(TAG, "Moving out of Standby Mode.");
deferred_offer = false;
} else {
ESP_LOGI(TAG, "Putting Camera in Standby Mode.");
deferred_offer = true;
}
}
app_driver_handle_t app_driver_button_init() {
/* Initialize button */
button_handle_t handle = NULL;
const button_config_t btn_cfg = {0};
const button_gpio_config_t btn_gpio_cfg = button_driver_get_config();
if (iot_button_new_gpio_device(&btn_cfg, &btn_gpio_cfg, &handle) != ESP_OK) {
ESP_LOGE(TAG, "Failed to create button device");
return NULL;
}
iot_button_register_cb(handle, BUTTON_PRESS_DOWN, NULL,
app_driver_button_toggle_cb, NULL);
iot_button_register_cb(handle, BUTTON_DOUBLE_CLICK, NULL,
app_driver_button_double_click_cb, NULL);
return (app_driver_handle_t)handle;
}
+288
View File
@@ -0,0 +1,288 @@
/*
This example code is in the Public Domain (or CC0 licensed, at your option.)
Unless required by applicable law or agreed to in writing, this
software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
*/
#include <esp_err.h>
#include <esp_log.h>
#include <nvs_flash.h>
#include <esp_matter.h>
#include <esp_matter_console.h>
#include <esp_matter_ota.h>
#include <app_priv.h>
#include <app_reset.h>
#include <common_macros.h>
#include <esp_wifi.h>
#include <app/server/CommissioningWindowManager.h>
#include <app/server/Server.h>
#if CONFIG_ENABLE_SNTP_TIME_SYNC
#include <app/clusters/time-synchronization-server/DefaultTimeSyncDelegate.h>
#endif
#include "camera-app.h"
#include "camera-device.h"
#include "esp_webrtc_time.h"
#include "esp_work_queue.h"
#include "signaling_serializer.h"
#include "webrtc_bridge.h"
extern "C" void network_coprocessor_init();
static const char *TAG = "app_main";
uint16_t camera_endpoint_id = 0;
using namespace esp_matter;
using namespace esp_matter::attribute;
using namespace esp_matter::endpoint;
using namespace chip::app::Clusters;
using namespace Camera;
CameraDevice gCameraDevice;
constexpr auto k_timeout_seconds = 300;
static void app_event_cb(const ChipDeviceEvent *event, intptr_t arg) {
switch (event->Type) {
case chip::DeviceLayer::DeviceEventType::kInterfaceIpAddressChanged:
ESP_LOGI(TAG, "Interface IP Address changed");
esp_webrtc_time_sntp_time_sync_no_wait();
break;
case chip::DeviceLayer::DeviceEventType::kCommissioningComplete:
ESP_LOGI(TAG, "Commissioning complete");
break;
case chip::DeviceLayer::DeviceEventType::kFailSafeTimerExpired:
ESP_LOGI(TAG, "Commissioning failed, fail safe timer expired");
break;
case chip::DeviceLayer::DeviceEventType::kCommissioningSessionStarted:
ESP_LOGI(TAG, "Commissioning session started");
break;
case chip::DeviceLayer::DeviceEventType::kCommissioningSessionStopped:
ESP_LOGI(TAG, "Commissioning session stopped");
break;
case chip::DeviceLayer::DeviceEventType::kCommissioningWindowOpened:
ESP_LOGI(TAG, "Commissioning window opened");
break;
case chip::DeviceLayer::DeviceEventType::kCommissioningWindowClosed:
ESP_LOGI(TAG, "Commissioning window closed");
break;
case chip::DeviceLayer::DeviceEventType::kFabricRemoved: {
ESP_LOGI(TAG, "Fabric removed successfully");
if (chip::Server::GetInstance().GetFabricTable().FabricCount() == 0) {
chip::CommissioningWindowManager &commissionMgr =
chip::Server::GetInstance().GetCommissioningWindowManager();
constexpr auto kTimeoutSeconds =
chip::System::Clock::Seconds16(k_timeout_seconds);
if (!commissionMgr.IsCommissioningWindowOpen()) {
/* After removing last fabric, this example does not remove the Wi-Fi
* credentials and still has IP connectivity so, only advertising on
* DNS-SD.
*/
CHIP_ERROR err = commissionMgr.OpenBasicCommissioningWindow(
kTimeoutSeconds,
chip::CommissioningWindowAdvertisement::kDnssdOnly);
if (err != CHIP_NO_ERROR) {
ESP_LOGE(
TAG,
"Failed to open commissioning window, err:%" CHIP_ERROR_FORMAT,
err.Format());
}
}
}
break;
}
case chip::DeviceLayer::DeviceEventType::kFabricWillBeRemoved:
ESP_LOGI(TAG, "Fabric will be removed");
break;
case chip::DeviceLayer::DeviceEventType::kFabricUpdated:
ESP_LOGI(TAG, "Fabric is updated");
break;
case chip::DeviceLayer::DeviceEventType::kFabricCommitted:
ESP_LOGI(TAG, "Fabric is committed");
break;
case chip::DeviceLayer::DeviceEventType::kBLEDeinitialized:
ESP_LOGI(TAG, "BLE deinitialized and memory reclaimed");
break;
default:
break;
}
}
// This callback is invoked when clients interact with the Identify Cluster.
// In the callback implementation, an endpoint can identify itself. (e.g., by
// flashing an LED or light).
static esp_err_t app_identification_cb(identification::callback_type_t type,
uint16_t endpoint_id, uint8_t effect_id,
uint8_t effect_variant,
void *priv_data) {
ESP_LOGI(TAG, "Identification callback: type: %u, effect: %u, variant: %u",
type, effect_id, effect_variant);
return ESP_OK;
}
// This callback is called for every attribute update. The callback
// implementation shall handle the desired attributes and return an appropriate
// error code. If the attribute is not of your interest, please do not return an
// error code and strictly return ESP_OK.
static esp_err_t
app_attribute_update_cb(attribute::callback_type_t type, uint16_t endpoint_id,
uint32_t cluster_id, uint32_t attribute_id,
esp_matter_attr_val_t *val, void *priv_data) {
esp_err_t err = ESP_OK;
if (type == PRE_UPDATE) {
/* Driver update */
app_driver_handle_t driver_handle = (app_driver_handle_t)priv_data;
}
return err;
}
#ifdef CONFIG_SLAVE_LWIP_ENABLED
static void create_slave_sta_netif(uint8_t dhcp_at_slave) {
/* Create "almost" default station, but with un-flagged DHCP client */
esp_netif_inherent_config_t netif_cfg;
memcpy(&netif_cfg, ESP_NETIF_BASE_DEFAULT_WIFI_STA, sizeof(netif_cfg));
if (!dhcp_at_slave) {
netif_cfg.flags =
(esp_netif_flags_t)(netif_cfg.flags & ~ESP_NETIF_DHCP_CLIENT);
}
esp_netif_config_t cfg_sta = {
.base = &netif_cfg,
.stack = ESP_NETIF_NETSTACK_DEFAULT_WIFI_STA,
};
esp_netif_t *netif_sta = esp_netif_new(&cfg_sta);
assert(netif_sta);
ESP_ERROR_CHECK(esp_netif_attach_wifi_station(netif_sta));
ESP_ERROR_CHECK(esp_wifi_set_default_wifi_sta_handlers());
if (!dhcp_at_slave)
ESP_ERROR_CHECK(esp_netif_dhcpc_stop(netif_sta));
// slave_sta_netif = netif_sta;
}
#endif
void sdp_mem_dump() {
printf("\tDescription\tInternal\tSPIRAM\n");
printf("Total Memory\t\t%d\t\t%d\n",
heap_caps_get_total_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
heap_caps_get_total_size(MALLOC_CAP_SPIRAM));
printf("Current Free Memory\t%d\t\t%d\n",
heap_caps_get_free_size(MALLOC_CAP_8BIT) -
heap_caps_get_free_size(MALLOC_CAP_SPIRAM),
heap_caps_get_free_size(MALLOC_CAP_SPIRAM));
printf(
"Largest Free Block\t%d\t\t%d\n",
heap_caps_get_largest_free_block(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
heap_caps_get_largest_free_block(MALLOC_CAP_SPIRAM));
printf("Min. Ever Free Size\t%d\t\t%d\n",
heap_caps_get_minimum_free_size(MALLOC_CAP_8BIT | MALLOC_CAP_INTERNAL),
heap_caps_get_minimum_free_size(MALLOC_CAP_SPIRAM));
}
extern "C" void app_main() {
esp_err_t err = ESP_OK;
/* Initialize the ESP NVS layer */
nvs_flash_init();
ESP_ERROR_CHECK(esp_netif_init());
ESP_ERROR_CHECK(esp_event_loop_create_default());
#ifdef CONFIG_SLAVE_LWIP_ENABLED
create_slave_sta_netif(true);
#endif
signaling_serializer_init();
network_coprocessor_init();
esp_work_queue_init();
esp_work_queue_start();
webrtc_bridge_start();
/* Initialize driver */
app_driver_handle_t button_handle = app_driver_button_init();
app_reset_button_register(button_handle);
/* Create a Matter node and add the mandatory Root Node device type on
* endpoint 0 */
node::config_t node_config;
// node handle can be used to add/modify other endpoints.
node_t *node = node::create(&node_config, app_attribute_update_cb,
app_identification_cb);
ABORT_APP_ON_FAILURE(node != nullptr,
ESP_LOGE(TAG, "Failed to create Matter node"));
camera::config_t cam_config;
#if CONFIG_ENABLE_SNTP_TIME_SYNC
static chip::app::Clusters::TimeSynchronization::DefaultTimeSyncDelegate
time_sync_delegate;
// cam_config.time_synchronization.delegate = &time_sync_delegate;
#endif
// endpoint handles can be used to add/modify clusters.
cam_config.camera_av_stream_management.feature_flags = cluster::camera_av_stream_management::feature::audio::get_id() | cluster::camera_av_stream_management::feature::video::get_id() | cluster::camera_av_stream_management::feature::snapshot::get_id();
endpoint_t *endpoint =
camera::create(node, &cam_config, ENDPOINT_FLAG_NONE, NULL);
ABORT_APP_ON_FAILURE(endpoint != nullptr,
ESP_LOGE(TAG, "Failed to create camera endpoint"));
#if CONFIG_ENABLE_SNTP_TIME_SYNC
cluster::time_synchronization::feature::time_zone::config_t tz_cfg;
cluster_t *time_sync_cluster =
cluster::get(endpoint, TimeSynchronization::Id);
cluster::time_synchronization::feature::time_zone::add(time_sync_cluster,
&tz_cfg);
#endif
camera_endpoint_id = endpoint::get_id(endpoint);
ESP_LOGI(TAG, "Camera created with endpoint_id %d", camera_endpoint_id);
/* Matter start */
err = esp_matter::start(app_event_cb);
ABORT_APP_ON_FAILURE(err == ESP_OK,
ESP_LOGE(TAG, "Failed to start Matter, err:%d", err));
ESP_LOGW("Camera", "ESP Matter Camera App: ApplicationInit()");
lock::ScopedChipStackLock lock(portMAX_DELAY);
gCameraDevice.Init();
CameraAppInit(&gCameraDevice);
#if CONFIG_ENABLE_CHIP_SHELL
esp_matter::console::diagnostics_register_commands();
esp_matter::console::wifi_register_commands();
esp_matter::console::factoryreset_register_commands();
esp_matter::console::init();
#endif
}
+45
View File
@@ -0,0 +1,45 @@
/*
This example code is in the Public Domain (or CC0 licensed, at your option.)
Unless required by applicable law or agreed to in writing, this
software is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
CONDITIONS OF ANY KIND, either express or implied.
*/
#pragma once
#include <esp_err.h>
#include <esp_matter.h>
#if CHIP_DEVICE_CONFIG_ENABLE_THREAD
#include "esp_openthread_types.h"
#endif
/** Standard max values (used for remapping attributes) */
#define STANDARD_BRIGHTNESS 100
#define STANDARD_HUE 360
#define STANDARD_SATURATION 100
#define STANDARD_TEMPERATURE_FACTOR 1000000
/** Matter max values (used for remapping attributes) */
#define MATTER_BRIGHTNESS 254
#define MATTER_HUE 254
#define MATTER_SATURATION 254
#define MATTER_TEMPERATURE_FACTOR 1000000
/** Default attribute values used during initialization */
#define DEFAULT_POWER true
#define DEFAULT_BRIGHTNESS 64
#define DEFAULT_HUE 128
#define DEFAULT_SATURATION 254
typedef void *app_driver_handle_t;
/** Initialize the button driver
*
* This initializes the button driver associated with the selected board.
*
* @return Handle on success.
* @return NULL in case of failure.
*/
app_driver_handle_t app_driver_button_init();
+608
View File
@@ -0,0 +1,608 @@
#include "camera-device.h"
#include <filesystem>
#include <fstream>
#include <iostream>
#include <lib/support/logging/CHIPLogging.h>
#include <limits.h> // For PATH_MAX
#include <sys/ioctl.h>
using namespace chip::app::Clusters;
using namespace chip::app::Clusters::CameraAvStreamManagement;
using namespace chip::app::Clusters::WebRTCTransportProvider;
using namespace Camera;
CameraDevice::CameraDevice() {
// Set the CameraHALInterface in CameraAVStreamManager
mCameraAVStreamManager.SetCameraDeviceHAL(this);
// Set the CameraDevice interface in WebRTCManager
mWebRTCProviderManager.SetCameraDevice(this);
}
CameraDevice::~CameraDevice() {}
void CameraDevice::Init() {
InitializeCameraDevice();
InitializeStreams();
mWebRTCProviderManager.Init();
}
CameraError CameraDevice::InitializeCameraDevice() {
return CameraError::SUCCESS;
}
CameraError CameraDevice::InitializeStreams() {
InitializeVideoStreams();
InitializeAudioStreams();
InitializeSnapshotStreams();
return CameraError::SUCCESS;
}
// Find the closest allocated snapshot stream with resolution >= requested, or
// closest possible
bool CameraDevice::MatchClosestSnapshotParams(
const VideoResolutionStruct &requested,
VideoResolutionStruct &matchedResolution, ImageCodecEnum &matchedCodec) {
int64_t requestedPixels =
static_cast<int64_t>(requested.width) * requested.height;
int64_t bestDiff = std::numeric_limits<int64_t>::max();
int64_t bestGEQDiff = std::numeric_limits<int64_t>::max();
const SnapshotStream *bestStream = nullptr;
const SnapshotStream *bestGEQStream = nullptr;
for (const auto &stream : mSnapshotStreams) {
int64_t streamPixels =
static_cast<int64_t>(stream.snapshotStreamParams.minResolution.width) *
stream.snapshotStreamParams.minResolution.height;
int64_t diff = streamPixels - requestedPixels;
int64_t absDiff = std::abs(diff);
// Candidate 1: First stream with resolution >= requested
if (diff >= 0 && diff < bestGEQDiff) {
bestGEQDiff = diff;
bestGEQStream = &stream;
}
// Candidate 2: Closest stream (absolute difference)
if (absDiff < bestDiff) {
bestDiff = absDiff;
bestStream = &stream;
}
}
const SnapshotStream *chosen = bestGEQStream ? bestGEQStream : bestStream;
if (chosen) {
matchedResolution = chosen->snapshotStreamParams.minResolution;
matchedCodec = chosen->snapshotStreamParams.imageCodec;
return true;
}
return false;
}
CameraError CameraDevice::CaptureSnapshot(
const chip::app::DataModel::Nullable<uint16_t> streamID,
const VideoResolutionStruct &resolution, ImageSnapshot &outImageSnapshot) {
VideoResolutionStruct matchedRes;
ImageCodecEnum matchedCodec;
if (streamID.IsNull()) {
if (!MatchClosestSnapshotParams(resolution, matchedRes, matchedCodec)) {
ChipLogError(
Camera,
"No matching snapshot stream found for requested resolution %ux%u",
resolution.width, resolution.height);
return CameraError::ERROR_CAPTURE_SNAPSHOT_FAILED;
}
} else {
uint16_t streamId = streamID.Value();
auto it = std::find_if(mSnapshotStreams.begin(), mSnapshotStreams.end(),
[streamId](const SnapshotStream &s) {
return s.snapshotStreamParams.snapshotStreamID ==
streamId;
});
if (it == mSnapshotStreams.end()) {
ChipLogError(Camera, "Snapshot stream not found for stream ID %u",
streamId);
return CameraError::ERROR_CAPTURE_SNAPSHOT_FAILED;
}
matchedRes = it->snapshotStreamParams.minResolution;
matchedCodec = it->snapshotStreamParams.imageCodec;
}
// Create a meaningful dummy JPEG image for ESP32
// This is a 32x32 pixel JPEG with "ESP32" text pattern (428 bytes)
static const uint8_t dummy_jpeg[] = {
0xFF, 0xD8, 0xFF, 0xE0, 0x00, 0x10, 0x4A, 0x46, 0x49, 0x46, 0x00, 0x01,
0x01, 0x01, 0x00, 0x48, 0x00, 0x48, 0x00, 0x00, 0xFF, 0xDB, 0x00, 0x43,
0x00, 0x08, 0x06, 0x06, 0x07, 0x06, 0x05, 0x08, 0x07, 0x07, 0x07, 0x09,
0x09, 0x08, 0x0A, 0x0C, 0x14, 0x0D, 0x0C, 0x0B, 0x0B, 0x0C, 0x19, 0x12,
0x13, 0x0F, 0x14, 0x1D, 0x1A, 0x1F, 0x1E, 0x1D, 0x1A, 0x1C, 0x1C, 0x20,
0x24, 0x2E, 0x27, 0x20, 0x22, 0x2C, 0x23, 0x1C, 0x1C, 0x28, 0x37, 0x29,
0x2C, 0x30, 0x31, 0x34, 0x34, 0x34, 0x1F, 0x27, 0x39, 0x3D, 0x38, 0x32,
0x3C, 0x2E, 0x33, 0x34, 0x32, 0xFF, 0xC0, 0x00, 0x11, 0x08, 0x00, 0x20,
0x00, 0x20, 0x01, 0x11, 0x00, 0x02, 0x11, 0x01, 0x03, 0x11, 0x01, 0xFF,
0xC4, 0x00, 0x14, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0xFF, 0xC4,
0x00, 0x14, 0x10, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xDA, 0x00, 0x0C,
0x03, 0x01, 0x00, 0x02, 0x11, 0x03, 0x11, 0x00, 0x3F, 0x00, 0x80, 0xFF,
0xD9};
// Copy the dummy JPEG data to the output
outImageSnapshot.data.assign(dummy_jpeg, dummy_jpeg + sizeof(dummy_jpeg));
outImageSnapshot.imageRes = matchedRes;
outImageSnapshot.imageCodec = matchedCodec;
return CameraError::SUCCESS;
}
// Allocate snapshot stream
CameraError CameraDevice::AllocateSnapshotStream(
const CameraAVStreamMgmtDelegate::SnapshotStreamAllocateArgs &args,
uint16_t &outStreamID) {
if (AddSnapshotStream(args, outStreamID)) {
auto it = std::find_if(mSnapshotStreams.begin(), mSnapshotStreams.end(),
[outStreamID](const SnapshotStream &s) {
return s.snapshotStreamParams.snapshotStreamID ==
outStreamID;
});
if (it == mSnapshotStreams.end()) {
ChipLogError(Camera, "Snapshot stream with ID %u not found", outStreamID);
return CameraError::ERROR_RESOURCE_EXHAUSTED;
}
it->isAllocated = true;
ChipLogProgress(Camera, "Allocated snapshot stream with ID: %u",
outStreamID);
return CameraError::SUCCESS;
}
return CameraError::ERROR_RESOURCE_EXHAUSTED;
}
uint8_t CameraDevice::GetMaxConcurrentEncoders() {
return kMaxConcurrentEncoders;
}
uint32_t CameraDevice::GetMaxEncodedPixelRate() { return kMaxEncodedPixelRate; }
VideoSensorParamsStruct &CameraDevice::GetVideoSensorParams() {
static VideoSensorParamsStruct videoSensorParams = {
kVideoSensorWidthPixels, kVideoSensorHeightPixels, kMaxVideoFrameRate,
chip::Optional<uint16_t>(30)}; // Typical numbers for Pi camera.
return videoSensorParams;
}
bool CameraDevice::GetCameraSupportsHDR() { return false; }
bool CameraDevice::GetCameraSupportsNightVision() { return false; }
bool CameraDevice::GetNightVisionUsesInfrared() { return false; }
bool CameraDevice::GetCameraSupportsWatermark() { return true; }
bool CameraDevice::GetCameraSupportsOSD() { return true; }
bool CameraDevice::GetCameraSupportsSoftPrivacy() { return false; }
bool CameraDevice::GetCameraSupportsImageControl() { return false; }
VideoResolutionStruct &CameraDevice::GetMinViewport() {
static VideoResolutionStruct minViewport = {kMinResolutionWidth,
kMinResolutionHeight};
return minViewport;
}
std::vector<RateDistortionTradeOffStruct> &
CameraDevice::GetRateDistortionTradeOffPoints() {
static std::vector<RateDistortionTradeOffStruct> rateDistTradeOffs = {
{VideoCodecEnum::kH264,
{kMinResolutionWidth, kMinResolutionHeight},
10000 /* bitrate */}};
return rateDistTradeOffs;
}
uint32_t CameraDevice::GetMaxContentBufferSize() {
return kMaxContentBufferSizeBytes;
}
AudioCapabilitiesStruct &CameraDevice::GetMicrophoneCapabilities() {
static std::array<AudioCodecEnum, 2> audioCodecs = {AudioCodecEnum::kOpus,
AudioCodecEnum::kAacLc};
static std::array<uint32_t, 2> sampleRates = {48000,
32000}; // Sample rates in Hz
static std::array<uint8_t, 2> bitDepths = {24, 32};
static AudioCapabilitiesStruct audioCapabilities = {
kMicrophoneMaxChannelCount, chip::Span<AudioCodecEnum>(audioCodecs),
chip::Span<uint32_t>(sampleRates), chip::Span<uint8_t>(bitDepths)};
return audioCapabilities;
}
AudioCapabilitiesStruct &CameraDevice::GetSpeakerCapabilities() {
static std::array<AudioCodecEnum, 2> audioCodecs = {AudioCodecEnum::kOpus,
AudioCodecEnum::kAacLc};
static std::array<uint32_t, 2> sampleRates = {48000,
32000}; // Sample rates in Hz
static std::array<uint8_t, 2> bitDepths = {24, 32};
static AudioCapabilitiesStruct speakerCapabilities = {
kSpeakerMaxChannelCount, chip::Span<AudioCodecEnum>(audioCodecs),
chip::Span<uint32_t>(sampleRates), chip::Span<uint8_t>(bitDepths)};
return speakerCapabilities;
}
std::vector<SnapshotCapabilitiesStruct> &
CameraDevice::GetSnapshotCapabilities() {
static std::vector<SnapshotCapabilitiesStruct> snapshotCapabilities = {
{{kMinResolutionWidth, kMinResolutionHeight},
kSnapshotStreamFrameRate,
ImageCodecEnum::kJpeg,
false,
chip::MakeOptional(static_cast<bool>(false))}};
return snapshotCapabilities;
}
CameraError CameraDevice::SetNightVision(TriStateAutoEnum nightVision) {
mNightVision = nightVision;
return CameraError::SUCCESS;
}
uint32_t CameraDevice::GetMaxNetworkBandwidth() {
return kMaxNetworkBandwidthbps;
}
uint16_t CameraDevice::GetCurrentFrameRate() { return mCurrentVideoFrameRate; }
CameraError CameraDevice::SetHDRMode(bool hdrMode) {
mHDREnabled = hdrMode;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetHardPrivacyMode(bool hardPrivacyMode) {
ChipLogProgress(Camera, "SetHardPrivacyMode: Setting hard privacy mode to %s",
hardPrivacyMode ? "true" : "false");
mHardPrivacyModeOn = hardPrivacyMode;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetStreamUsagePriorities(
std::vector<StreamUsageEnum> streamUsagePriorities) {
mStreamUsagePriorities = streamUsagePriorities;
return CameraError::SUCCESS;
}
std::vector<StreamUsageEnum> &CameraDevice::GetSupportedStreamUsages() {
static std::vector<StreamUsageEnum> supportedStreamUsage = {
StreamUsageEnum::kLiveView, StreamUsageEnum::kRecording};
return supportedStreamUsage;
}
CameraError CameraDevice::SetViewport(
const chip::app::Clusters::Globals::Structs::ViewportStruct::Type
&viewPort) {
mViewport = viewPort;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetViewport(
VideoStream &stream,
const chip::app::Clusters::Globals::Structs::ViewportStruct::Type
&viewport) {
ChipLogDetail(Camera, "Setting per stream viewport for stream %d.",
stream.videoStreamParams.videoStreamID);
ChipLogDetail(Camera, "New viewport. x1=%d, x2=%d, y1=%d, y2=%d.",
viewport.x1, viewport.x2, viewport.y1, viewport.y2);
stream.viewport = viewport;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetSoftRecordingPrivacyModeEnabled(
bool softRecordingPrivacyMode) {
mSoftRecordingPrivacyModeEnabled = softRecordingPrivacyMode;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetSoftLivestreamPrivacyModeEnabled(
bool softLivestreamPrivacyMode) {
mSoftLivestreamPrivacyModeEnabled = softLivestreamPrivacyMode;
// Notify WebRTCProviderManager about change
mWebRTCProviderManager.LiveStreamPrivacyModeChanged(
softLivestreamPrivacyMode);
return CameraError::SUCCESS;
}
// Mute/Unmute speaker.
CameraError CameraDevice::SetSpeakerMuted(bool muteSpeaker) {
mSpeakerMuted = muteSpeaker;
return CameraError::SUCCESS;
}
// Set speaker volume level.
CameraError CameraDevice::SetSpeakerVolume(uint8_t speakerVol) {
mSpeakerVol = speakerVol;
return CameraError::SUCCESS;
}
// Mute/Unmute microphone.
CameraError CameraDevice::SetMicrophoneMuted(bool muteMicrophone) {
mMicrophoneMuted = muteMicrophone;
return CameraError::SUCCESS;
}
// Set microphone volume level.
CameraError CameraDevice::SetMicrophoneVolume(uint8_t microphoneVol) {
mMicrophoneVol = microphoneVol;
return CameraError::SUCCESS;
}
// Set image rotation attributes
CameraError CameraDevice::SetImageRotation(uint16_t imageRotation) {
mImageRotation = imageRotation;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetImageFlipHorizontal(bool imageFlipHorizontal) {
mImageFlipHorizontal = imageFlipHorizontal;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetImageFlipVertical(bool imageFlipVertical) {
mImageFlipVertical = imageFlipVertical;
return CameraError::SUCCESS;
}
CameraError
CameraDevice::SetLocalVideoRecordingEnabled(bool localVideoRecordingEnabled) {
mLocalVideoRecordingEnabled = localVideoRecordingEnabled;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetLocalSnapshotRecordingEnabled(
bool localSnapshotRecordingEnabled) {
mLocalSnapshotRecordingEnabled = localSnapshotRecordingEnabled;
return CameraError::SUCCESS;
}
CameraError CameraDevice::SetStatusLightEnabled(bool statusLightEnabled) {
mStatusLightEnabled = statusLightEnabled;
return CameraError::SUCCESS;
}
void CameraDevice::InitializeVideoStreams() {
// Create a video stream with a max resolution of 720p and max frame rate of
// 60 fps
VideoStream videoStream1 = {
{
1 /* Id */,
StreamUsageEnum::kLiveView /* StreamUsage */,
VideoCodecEnum::kH264,
kMinVideoFrameRate /* MinFrameRate */,
k60fpsVideoFrameRate /* MaxFrameRate */,
{kMinResolutionWidth, kMinResolutionHeight} /* MinResolution */,
{k720pResolutionWidth, k720pResolutionHeight} /* MaxResolution */,
kMinBitRateBps /* MinBitRate */,
kMaxBitRateBps /* MaxBitRate */,
kKeyFrameIntervalMsec /* KeyFrameInterval */,
chip::MakeOptional(static_cast<bool>(false)) /* WMark */,
chip::MakeOptional(static_cast<bool>(false)) /* OSD */,
0 /* RefCount */
},
false,
{mViewport.x1, mViewport.y1, mViewport.x2, mViewport.y2},
nullptr};
mVideoStreams.push_back(videoStream1);
// Create a video stream with a min framerate of 60 fps and min resolution
// of 720p
VideoStream videoStream2 = {
{
2 /* Id */,
StreamUsageEnum::kLiveView /* StreamUsage */,
VideoCodecEnum::kH264,
k60fpsVideoFrameRate /* MinFrameRate */,
kMaxVideoFrameRate /* MaxFrameRate */,
{k720pResolutionWidth, k720pResolutionHeight} /* MinResolution */,
{kMaxResolutionWidth, kMaxResolutionHeight} /* MaxResolution */,
kMinBitRateBps /* MinBitRate */,
kMaxBitRateBps /* MaxBitRate */,
kKeyFrameIntervalMsec /* KeyFrameInterval */,
chip::MakeOptional(static_cast<bool>(false)) /* WMark */,
chip::MakeOptional(static_cast<bool>(false)) /* OSD */,
0 /* RefCount */
},
false,
{mViewport.x1, mViewport.y1, mViewport.x2, mViewport.y2},
nullptr};
mVideoStreams.push_back(videoStream2);
// Create a video stream for the full range(fps, resolution, bitrate)
// supported by the camera.
VideoStream videoStream3 = {
{
3 /* Id */,
StreamUsageEnum::kLiveView /* StreamUsage */,
VideoCodecEnum::kH264,
kMinVideoFrameRate /* MinFrameRate */,
kMaxVideoFrameRate /* MaxFrameRate */,
{kMinResolutionWidth, kMinResolutionHeight} /* MinResolution */,
{kMaxResolutionWidth, kMaxResolutionHeight} /* MaxResolution */,
kMinBitRateBps /* MinBitRate */,
kMaxBitRateBps /* MaxBitRate */,
kKeyFrameIntervalMsec /* KeyFrameInterval */,
chip::MakeOptional(static_cast<bool>(false)) /* WMark */,
chip::MakeOptional(static_cast<bool>(false)) /* OSD */,
0 /* RefCount */
},
false,
{mViewport.x1, mViewport.y1, mViewport.x2, mViewport.y2},
nullptr};
mVideoStreams.push_back(videoStream3);
}
void CameraDevice::InitializeAudioStreams() {
// Mono stream
AudioStream monoStream = {
{
1 /* Id */, StreamUsageEnum::kLiveView, AudioCodecEnum::kOpus,
1 /* ChannelCount: Mono */, 48000 /* SampleRate */,
20000 /* BitRate */, 24 /* BitDepth */, 0 /* RefCount */
},
false,
nullptr};
mAudioStreams.push_back(monoStream);
// Stereo stream
AudioStream stereoStream = {
{
2 /* Id */, StreamUsageEnum::kLiveView, AudioCodecEnum::kOpus,
2 /* ChannelCount: Stereo */, 48000 /* SampleRate */,
32000 /* BitRate */, 24 /* BitDepth */, 0 /* RefCount */
},
false,
nullptr};
mAudioStreams.push_back(stereoStream);
// Max channel count stream (from spec constant)
AudioStream maxChannelStream = {
{
3 /* Id */, StreamUsageEnum::kLiveView, AudioCodecEnum::kOpus,
kMicrophoneMaxChannelCount /* Max from Spec */,
48000 /* SampleRate */, 64000 /* BitRate */, 24 /* BitDepth */,
0 /* RefCount */
},
false,
nullptr};
mAudioStreams.push_back(maxChannelStream);
}
void CameraDevice::InitializeSnapshotStreams() {
// Create single snapshot stream with typical supported parameters
uint16_t streamId = kInvalidStreamID;
AddSnapshotStream(
{ImageCodecEnum::kJpeg,
kSnapshotStreamFrameRate /* FrameRate */,
{kMinResolutionWidth, kMinResolutionHeight} /* MinResolution*/,
{kMaxResolutionWidth, kMaxResolutionHeight} /* MaxResolution */,
90 /* Quality */},
streamId);
}
bool CameraDevice::AddSnapshotStream(
const CameraAVStreamMgmtDelegate::SnapshotStreamAllocateArgs
&snapshotStreamAllocateArgs,
uint16_t &outStreamID) {
constexpr uint16_t kMaxSnapshotStreams = std::numeric_limits<uint16_t>::max();
if (mSnapshotStreams.size() >= kMaxSnapshotStreams) {
ChipLogError(Camera, "Maximum number of snapshot streams reached. Cannot a "
"allocate new one");
return false;
}
uint16_t streamId = 0;
// Fetch a new stream ID if the passed ID is kInvalidStreamID, otherwise use
// the ID that was passed in. A valid streamID would be passed in when the
// stream list is being constructed from the persisted list of allocated
// streams that was loaded at Init()
if (outStreamID == kInvalidStreamID) {
for (const auto &s : mSnapshotStreams) {
// Find the highest existing stream ID.
if (s.snapshotStreamParams.snapshotStreamID > streamId) {
streamId = s.snapshotStreamParams.snapshotStreamID;
}
}
// Find a unique stream id, starting from the last used one above,
// incrementing and wrapping at 65535.
for (uint16_t attempts = 0; attempts < kMaxSnapshotStreams; ++attempts) {
auto found = std::find_if(
mSnapshotStreams.begin(), mSnapshotStreams.end(),
[streamId](const SnapshotStream &s) {
return s.snapshotStreamParams.snapshotStreamID == streamId;
});
if (found == mSnapshotStreams.end()) {
break;
}
if (attempts == kMaxSnapshotStreams - 1) {
ChipLogError(Camera, "No available slot for stream allocation");
return false;
}
streamId = static_cast<uint16_t>(
(streamId + 1) % kMaxSnapshotStreams); // Wraps to 0 after max-1
}
outStreamID = streamId;
} else {
// Have a sanity check that the passed streamID does not already exist
// in the list
auto found = std::find_if(
mSnapshotStreams.begin(), mSnapshotStreams.end(),
[outStreamID](const SnapshotStream &s) {
return s.snapshotStreamParams.snapshotStreamID == outStreamID;
});
if (found == mSnapshotStreams.end()) {
streamId = outStreamID;
} else {
ChipLogError(
Camera,
"StreamID %d already exists in the available snapshot stream list",
outStreamID);
return false;
}
}
SnapshotStream snapshotStream = {
{
streamId, snapshotStreamAllocateArgs.imageCodec,
snapshotStreamAllocateArgs.maxFrameRate,
snapshotStreamAllocateArgs.minResolution,
snapshotStreamAllocateArgs.maxResolution,
snapshotStreamAllocateArgs.quality, 0 /* RefCount */
},
false,
nullptr};
mSnapshotStreams.push_back(snapshotStream);
return true;
}
WebRTCTransportProvider::Delegate &CameraDevice::GetWebRTCProviderDelegate() {
return mWebRTCProviderManager;
}
WebRTCTransportProvider::WebRTCTransportProviderController &
CameraDevice::GetWebRTCProviderController() {
return mWebRTCProviderManager;
}
CameraAVStreamMgmtDelegate &CameraDevice::GetCameraAVStreamMgmtDelegate() {
return mCameraAVStreamManager;
}
CameraAVStreamController &CameraDevice::GetCameraAVStreamMgmtController() {
return mCameraAVStreamManager;
}
+342
View File
@@ -0,0 +1,342 @@
/*
*
* Copyright (c) 2025 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "camera-av-stream-manager.h"
#include "camera-device-interface.h"
#include "webrtc-provider-manager.h"
#include <protocols/interaction_model/StatusCode.h>
// Camera Constraints set to typical values.
// TODO: Look into ways to fetch from hardware, if required/possible.
static constexpr uint32_t kMaxContentBufferSizeBytes = 4096;
static constexpr uint32_t kMaxNetworkBandwidthbps = 128000000; // 128 Mbps
static constexpr uint8_t kMaxConcurrentEncoders = 1;
static constexpr uint8_t kSpeakerMinLevel = 1;
static constexpr uint8_t kSpeakerMaxLevel = 254; // Spec constraint
static constexpr uint8_t kSpeakerMaxChannelCount = 8; // Same as Microphone
static constexpr uint32_t kMaxEncodedPixelRate =
248832000; // 1080p at 120fps(1920 * 1080 * 120)
static constexpr uint8_t kMicrophoneMinLevel = 1;
static constexpr uint8_t kMicrophoneMaxLevel = 254; // Spec constraint
static constexpr uint8_t kMicrophoneMaxChannelCount =
8; // Spec Constraint in AudioStreamAllocate
static constexpr uint16_t kMinResolutionWidth = 640; // Low SD resolution
static constexpr uint16_t kMinResolutionHeight = 360; // Low SD resolution
static constexpr uint16_t k720pResolutionWidth = 1280; // 720p resolution
static constexpr uint16_t k720pResolutionHeight = 720; // 720p resolution
static constexpr uint16_t kMaxResolutionWidth = 1920; // 1080p resolution
static constexpr uint16_t kMaxResolutionHeight = 1080; // 1080p resolution
static constexpr uint16_t kSnapshotStreamFrameRate = 30;
static constexpr uint16_t kMaxVideoFrameRate = 120;
static constexpr uint16_t k60fpsVideoFrameRate = 60;
static constexpr uint16_t kMinVideoFrameRate = 30;
static constexpr uint32_t kMinBitRateBps = 10000; // 10 kbps
static constexpr uint32_t kMaxBitRateBps = 2000000; // 2 mbps
static constexpr uint32_t kKeyFrameIntervalMsec =
4000; // 4 sec; recommendation from Spec
static constexpr uint16_t kVideoSensorWidthPixels = 1920; // 1080p resolution
static constexpr uint16_t kVideoSensorHeightPixels = 1080; // 1080p resolution
static constexpr uint16_t kMinImageRotation = 0;
static constexpr uint16_t kMaxImageRotation = 359; // Spec constraint
static constexpr uint8_t kMaxZones = 10; // Spec has min 1
static constexpr uint8_t kMaxUserDefinedZones = 10; // Spec has min 5
static constexpr uint8_t kSensitivityMax = 10; // Spec has 2 to 10
// StreamIDs typically start from 0 and monotonically increase. Setting
// Invalid value to a large and practically unused value.
static constexpr uint16_t kInvalidStreamID = 65500;
#define INVALID_SPKR_LEVEL (0)
namespace Camera {
class CameraDevice : public CameraDeviceInterface,
public CameraDeviceInterface::CameraHALInterface {
public:
chip::app::Clusters::WebRTCTransportProvider::Delegate &
GetWebRTCProviderDelegate() override;
chip::app::Clusters::WebRTCTransportProvider::
WebRTCTransportProviderController &
GetWebRTCProviderController() override;
chip::app::Clusters::CameraAvStreamManagement::CameraAVStreamMgmtDelegate &
GetCameraAVStreamMgmtDelegate() override;
chip::app::Clusters::CameraAvStreamManagement::CameraAVStreamController &
GetCameraAVStreamMgmtController() override;
CameraDevice();
~CameraDevice();
CameraDeviceInterface::CameraHALInterface &GetCameraHALInterface() override {
return *this;
}
void Init();
// HAL interface impl
CameraError InitializeCameraDevice() override;
CameraError InitializeStreams() override;
CameraError
CaptureSnapshot(const chip::app::DataModel::Nullable<uint16_t> streamID,
const VideoResolutionStruct &resolution,
ImageSnapshot &outImageSnapshot) override;
// Allocate snapshot stream
CameraError AllocateSnapshotStream(
const chip::app::Clusters::CameraAvStreamManagement::
CameraAVStreamMgmtDelegate::SnapshotStreamAllocateArgs &args,
uint16_t &outStreamID) override;
uint8_t GetMaxConcurrentEncoders() override;
uint32_t GetMaxEncodedPixelRate() override;
VideoSensorParamsStruct &GetVideoSensorParams() override;
bool GetCameraSupportsHDR() override;
bool GetCameraSupportsNightVision() override;
bool GetNightVisionUsesInfrared() override;
bool GetCameraSupportsWatermark() override;
bool GetCameraSupportsOSD() override;
bool GetCameraSupportsSoftPrivacy() override;
bool GetCameraSupportsImageControl() override;
VideoResolutionStruct &GetMinViewport() override;
std::vector<RateDistortionTradeOffStruct> &
GetRateDistortionTradeOffPoints() override;
uint32_t GetMaxContentBufferSize() override;
AudioCapabilitiesStruct &GetMicrophoneCapabilities() override;
AudioCapabilitiesStruct &GetSpeakerCapabilities() override;
std::vector<SnapshotCapabilitiesStruct> &GetSnapshotCapabilities() override;
uint32_t GetMaxNetworkBandwidth() override;
uint16_t GetCurrentFrameRate() override;
CameraError SetHDRMode(bool hdrMode) override;
bool GetHDRMode() override { return mHDREnabled; }
CameraError SetHardPrivacyMode(bool hardPrivacyMode) override;
bool GetHardPrivacyMode() override { return mHardPrivacyModeOn; }
CameraError
SetNightVision(chip::app::Clusters::CameraAvStreamManagement::TriStateAutoEnum
nightVision) override;
chip::app::Clusters::CameraAvStreamManagement::TriStateAutoEnum
GetNightVision() override {
return mNightVision;
}
std::vector<StreamUsageEnum> &GetSupportedStreamUsages() override;
std::vector<StreamUsageEnum> &GetStreamUsagePriorities() override {
return mStreamUsagePriorities;
}
CameraError SetStreamUsagePriorities(
std::vector<StreamUsageEnum> streamUsagePriorities) override;
// Sets the Default Camera Viewport
CameraError
SetViewport(const chip::app::Clusters::Globals::Structs::ViewportStruct::Type
&viewPort) override;
const chip::app::Clusters::Globals::Structs::ViewportStruct::Type &
GetViewport() override {
return mViewport;
}
/**
* Sets the Viewport for a specific stream. The implementation of this HAL API
* is responsible for updating the stream identified with the provided
* viewport. The invoker of this API shall have already ensured that the
* provided viewport conforms to the specification requirements on size and
* aspect ratio.
*
* @param stream the currently allocated video stream on which the viewport
* is being set
* @param viewport the viewport to be set on the stream
*/
CameraError
SetViewport(VideoStream &stream,
const chip::app::Clusters::Globals::Structs::ViewportStruct::Type
&viewport) override;
// Get/Set SoftRecordingPrivacyMode.
CameraError
SetSoftRecordingPrivacyModeEnabled(bool softRecordingPrivacyMode) override;
bool GetSoftRecordingPrivacyModeEnabled() override {
return mSoftRecordingPrivacyModeEnabled;
}
// Get/Set SoftLivestreamPrivacyMode.
CameraError
SetSoftLivestreamPrivacyModeEnabled(bool softLivestreamPrivacyMode) override;
bool GetSoftLivestreamPrivacyModeEnabled() override {
return mSoftLivestreamPrivacyModeEnabled;
}
// Currently, defaulting to not supporting hard privacy switch.
bool HasHardPrivacySwitch() override { return false; }
// Currently, defaulting to not supporting speaker.
bool HasSpeaker() override { return false; }
// Mute/Unmute speaker.
CameraError SetSpeakerMuted(bool muteSpeaker) override;
bool GetSpeakerMuted() override { return mSpeakerMuted; }
// Get/Set speaker volume level.
CameraError SetSpeakerVolume(uint8_t speakerVol) override;
uint8_t GetSpeakerVolume() override { return mSpeakerVol; }
// Get the speaker max and min levels.
uint8_t GetSpeakerMaxLevel() override { return INVALID_SPKR_LEVEL; }
uint8_t GetSpeakerMinLevel() override { return INVALID_SPKR_LEVEL; }
// Does camera have a microphone
bool HasMicrophone() override { return true; }
// Mute/Unmute microphone.
CameraError SetMicrophoneMuted(bool muteMicrophone) override;
bool GetMicrophoneMuted() override { return mMicrophoneMuted; }
// Set microphone volume level.
CameraError SetMicrophoneVolume(uint8_t microphoneVol) override;
uint8_t GetMicrophoneVolume() override { return mMicrophoneVol; }
// Get the microphone max and min levels.
uint8_t GetMicrophoneMaxLevel() override { return mMicrophoneMaxLevel; }
uint8_t GetMicrophoneMinLevel() override { return mMicrophoneMinLevel; }
// Get/Set image control attributes
CameraError SetImageRotation(uint16_t imageRotation) override;
uint16_t GetImageRotation() override { return mImageRotation; }
CameraError SetImageFlipHorizontal(bool imageFlipHorizontal) override;
bool GetImageFlipHorizontal() override { return mImageFlipHorizontal; }
CameraError SetImageFlipVertical(bool imageFlipVertical) override;
bool GetImageFlipVertical() override { return mImageFlipVertical; }
// Does camera have local storage
bool HasLocalStorage() override { return false; }
// Set/Get LocalVideoRecordingEnabled
CameraError
SetLocalVideoRecordingEnabled(bool localVideoRecordingEnabled) override;
bool GetLocalVideoRecordingEnabled() override {
return mLocalVideoRecordingEnabled;
}
// Set/Get LocalSnapshotRecordingEnabled
CameraError
SetLocalSnapshotRecordingEnabled(bool localSnapshotRecordingEnabled) override;
bool GetLocalSnapshotRecordingEnabled() override {
return mLocalSnapshotRecordingEnabled;
}
// Does camera have a status light
bool HasStatusLight() override { return true; }
// Set/Get StatusLightEnabled
CameraError SetStatusLightEnabled(bool statusLightEnabled) override;
bool GetStatusLightEnabled() override { return mStatusLightEnabled; }
std::vector<VideoStream> &GetAvailableVideoStreams() override {
return mVideoStreams;
}
std::vector<AudioStream> &GetAvailableAudioStreams() override {
return mAudioStreams;
}
std::vector<SnapshotStream> &GetAvailableSnapshotStreams() override {
return mSnapshotStreams;
}
private:
std::vector<VideoStream>
mVideoStreams; // Vector to hold available video streams
std::vector<AudioStream>
mAudioStreams; // Vector to hold available audio streams
std::vector<SnapshotStream>
mSnapshotStreams; // Vector to hold available snapshot streams
void InitializeVideoStreams();
void InitializeAudioStreams();
void InitializeSnapshotStreams();
bool AddSnapshotStream(const chip::app::Clusters::CameraAvStreamManagement::
CameraAVStreamManager::SnapshotStreamAllocateArgs
&snapshotStreamAllocateArgs,
uint16_t &outStreamID);
bool MatchClosestSnapshotParams(
const VideoResolutionStruct &requested,
VideoResolutionStruct &outResolution,
chip::app::Clusters::CameraAvStreamManagement::ImageCodecEnum &outCodec);
// Various cluster server delegates
chip::app::Clusters::WebRTCTransportProvider::WebRTCProviderManager
mWebRTCProviderManager;
chip::app::Clusters::CameraAvStreamManagement::CameraAVStreamManager
mCameraAVStreamManager;
// Use a standard 1080p aspect ratio
chip::app::Clusters::Globals::Structs::ViewportStruct::Type mViewport = {
0, 0, 1920, 1080};
uint16_t mCurrentVideoFrameRate = kMinVideoFrameRate;
bool mHDREnabled = false;
bool mSpeakerMuted = false;
bool mMicrophoneMuted = false;
bool mHardPrivacyModeOn = false;
chip::app::Clusters::CameraAvStreamManagement::TriStateAutoEnum mNightVision =
chip::app::Clusters::CameraAvStreamManagement::TriStateAutoEnum::kOff;
bool mSoftRecordingPrivacyModeEnabled = false;
bool mSoftLivestreamPrivacyModeEnabled = false;
uint8_t mSpeakerVol = kSpeakerMinLevel;
uint8_t mSpeakerMinLevel = kSpeakerMinLevel;
uint8_t mSpeakerMaxLevel = kSpeakerMaxLevel;
uint8_t mMicrophoneVol = kMicrophoneMinLevel;
uint8_t mMicrophoneMinLevel = kMicrophoneMinLevel;
uint8_t mMicrophoneMaxLevel = kMicrophoneMaxLevel;
bool mLocalVideoRecordingEnabled = false;
bool mLocalSnapshotRecordingEnabled = false;
bool mStatusLightEnabled = false;
uint16_t mImageRotation = kMinImageRotation;
bool mImageFlipHorizontal = false;
bool mImageFlipVertical = false;
uint8_t mDetectionSensitivity =
(1 + kSensitivityMax) / 2; // Average over the range
std::vector<StreamUsageEnum> mStreamUsagePriorities = {
StreamUsageEnum::kLiveView, StreamUsageEnum::kRecording};
};
} // namespace Camera
@@ -0,0 +1,881 @@
#include <app-common/zap-generated/attributes/Accessors.h>
#include <app-common/zap-generated/ids/Attributes.h>
#include <app-common/zap-generated/ids/Clusters.h>
#include <camera-av-stream-manager.h>
#include <fstream>
#include <iostream>
#include <lib/support/logging/CHIPLogging.h>
#include <set>
using namespace chip;
using namespace chip::app;
using namespace chip::app::DataModel;
using namespace chip::app::Clusters;
using namespace chip::app::Clusters::CameraAvStreamManagement;
using namespace chip::app::Clusters::CameraAvStreamManagement::Attributes;
using chip::Protocols::InteractionModel::Status;
namespace {
// Constants
constexpr uint16_t kInvalidStreamID = 65500;
} // namespace
void CameraAVStreamManager::SetCameraDeviceHAL(
CameraDeviceInterface *aCameraDeviceHAL) {
mCameraDeviceHAL = aCameraDeviceHAL;
}
CHIP_ERROR CameraAVStreamManager::ValidateStreamUsage(
StreamUsageEnum streamUsage,
Optional<DataModel::Nullable<uint16_t>> &videoStreamId,
Optional<DataModel::Nullable<uint16_t>> &audioStreamId) {
// The server ensures that at least one stream Id has a value, and that there
// are streams allocated If a stream id(s) are provided, it's sufficient to
// have verified that the provide usage is supported by the camera. If they're
// Null, look for a stream ID that matches the usage. A match does not need to
// be exact.
bool exactlyMatchedVideoStream = false;
bool looselyMatchedVideoStream = false;
uint16_t looseVideoStreamID;
bool exactlyMatchedAudioStream = false;
bool looselyMatchedAudioStream = false;
uint16_t looseAudioStreamID;
// Is the requested stream usage supported by the camera?
auto myStreamUsages =
GetCameraAVStreamMgmtServer()->GetSupportedStreamUsages();
auto it =
std::find(myStreamUsages.begin(), myStreamUsages.end(), streamUsage);
if (it == myStreamUsages.end()) {
ChipLogError(Camera,
"Requested stream usage not found in supported stream usages");
return CHIP_ERROR_NOT_FOUND;
}
if (videoStreamId.HasValue()) {
const std::vector<VideoStreamStruct> &allocatedVideoStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedVideoStreams();
// If no Video ID is provided, match to an allocated ID. Exact is preferred
// if found. We know the stream requested is in supported streams.
if (videoStreamId.Value().IsNull()) {
for (const auto &stream : allocatedVideoStreams) {
if (stream.streamUsage == streamUsage) {
videoStreamId.Emplace(stream.videoStreamID);
exactlyMatchedVideoStream = true;
break;
}
looselyMatchedVideoStream = true;
looseVideoStreamID = stream.videoStreamID;
}
} else {
// We've been provided with a stream ID, and we know the stream usage is
// supported by the camera, classify as an exact match
exactlyMatchedVideoStream = true;
}
}
if (audioStreamId.HasValue()) {
const std::vector<AudioStreamStruct> &allocatedAudioStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedAudioStreams();
// If no Audio ID is provided, match to an allocated ID. Exact is preferred
// if found. We know the stream requested is in supported streams.
if (audioStreamId.Value().IsNull()) {
for (const auto &stream : allocatedAudioStreams) {
if (stream.streamUsage == streamUsage) {
audioStreamId.Emplace(stream.audioStreamID);
exactlyMatchedAudioStream = true;
break;
}
looselyMatchedAudioStream = true;
looseAudioStreamID = stream.audioStreamID;
}
} else {
// We've been provided with a stream ID, and we know the stream usage is
// supported by the camera, classify as an exact match
exactlyMatchedAudioStream = true;
}
}
// If we have a loose match and no exact match, update the provided stream IDs
// with the loose match values
//
if (looselyMatchedAudioStream && !exactlyMatchedAudioStream) {
audioStreamId.Emplace(looseAudioStreamID);
}
if (looselyMatchedVideoStream && !exactlyMatchedVideoStream) {
videoStreamId.Emplace(looseVideoStreamID);
}
return CHIP_NO_ERROR;
}
const std::vector<
chip::app::Clusters::CameraAvStreamManagement::VideoStreamStruct> &
CameraAVStreamManager::GetAllocatedVideoStreams() const {
return GetCameraAVStreamMgmtServer()->GetAllocatedVideoStreams();
}
const std::vector<
chip::app::Clusters::CameraAvStreamManagement::AudioStreamStruct> &
CameraAVStreamManager::GetAllocatedAudioStreams() const {
return GetCameraAVStreamMgmtServer()->GetAllocatedAudioStreams();
}
void CameraAVStreamManager::GetBandwidthForStreams(
const Optional<DataModel::Nullable<uint16_t>> &videoStreamId,
const Optional<DataModel::Nullable<uint16_t>> &audioStreamId,
uint32_t &outBandwidthbps) {
outBandwidthbps = 0;
if (videoStreamId.HasValue() && !videoStreamId.Value().IsNull()) {
uint16_t vStreamId = videoStreamId.Value().Value();
auto &allocatedVideoStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedVideoStreams();
for (const chip::app::Clusters::CameraAvStreamManagement::Structs::
VideoStreamStruct::Type &stream : allocatedVideoStreams) {
if (stream.videoStreamID == vStreamId) {
outBandwidthbps += stream.maxBitRate;
ChipLogProgress(
Camera,
"GetBandwidthForStreams: VideoStream %u maxBitRate: %lu bps",
vStreamId, stream.maxBitRate);
break;
}
}
}
if (audioStreamId.HasValue() && !audioStreamId.Value().IsNull()) {
uint16_t aStreamId = audioStreamId.Value().Value();
auto &allocatedAudioStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedAudioStreams();
for (const chip::app::Clusters::CameraAvStreamManagement::Structs::
AudioStreamStruct::Type &stream : allocatedAudioStreams) {
if (stream.audioStreamID == aStreamId) {
outBandwidthbps += stream.bitRate;
ChipLogProgress(
Camera, "GetBandwidthForStreams: AudioStream %u bitRate: %lu bps",
aStreamId, stream.bitRate);
break;
}
}
}
return;
}
CHIP_ERROR
CameraAVStreamManager::ValidateVideoStreamID(uint16_t videoStreamId) {
const std::vector<VideoStreamStruct> &allocatedVideoStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedVideoStreams();
// Check if the videoStreamId exists in allocated streams
for (const auto &stream : allocatedVideoStreams) {
if (stream.videoStreamID == videoStreamId) {
ChipLogProgress(Camera, "Video stream ID %u is valid and allocated",
videoStreamId);
return CHIP_NO_ERROR;
}
}
ChipLogError(Camera,
"Video stream ID %u not found in allocated video streams",
videoStreamId);
return CHIP_ERROR_INVALID_ARGUMENT;
}
CHIP_ERROR
CameraAVStreamManager::ValidateAudioStreamID(uint16_t audioStreamId) {
const std::vector<AudioStreamStruct> &allocatedAudioStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedAudioStreams();
// Check if the audioStreamId exists in allocated streams
for (const auto &stream : allocatedAudioStreams) {
if (stream.audioStreamID == audioStreamId) {
ChipLogProgress(Camera, "Audio stream ID %u is valid and allocated",
audioStreamId);
return CHIP_NO_ERROR;
}
}
ChipLogError(Camera,
"Audio stream ID %u not found in allocated audio streams",
audioStreamId);
return CHIP_ERROR_INVALID_ARGUMENT;
}
CHIP_ERROR CameraAVStreamManager::IsHardPrivacyModeActive(bool &isActive) {
// Check privacy mode attributes
isActive = GetCameraAVStreamMgmtServer()->GetHardPrivacyModeOn();
return CHIP_NO_ERROR;
}
CHIP_ERROR
CameraAVStreamManager::IsSoftRecordingPrivacyModeActive(bool &isActive) {
// Check privacy mode attributes
isActive =
GetCameraAVStreamMgmtServer()->GetSoftRecordingPrivacyModeEnabled();
return CHIP_NO_ERROR;
}
CHIP_ERROR
CameraAVStreamManager::IsSoftLivestreamPrivacyModeActive(bool &isActive) {
// Check privacy mode attributes
isActive =
GetCameraAVStreamMgmtServer()->GetSoftLivestreamPrivacyModeEnabled();
return CHIP_NO_ERROR;
}
CHIP_ERROR CameraAVStreamManager::SetHardPrivacyModeOn(bool hardPrivacyMode) {
return GetCameraAVStreamMgmtServer()->SetHardPrivacyModeOn(hardPrivacyMode);
}
bool CameraAVStreamManager::HasAllocatedVideoStreams() {
const std::vector<VideoStreamStruct> &allocatedVideoStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedVideoStreams();
return !allocatedVideoStreams.empty();
}
bool CameraAVStreamManager::HasAllocatedAudioStreams() {
const std::vector<AudioStreamStruct> &allocatedAudioStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedAudioStreams();
return !allocatedAudioStreams.empty();
}
Protocols::InteractionModel::Status CameraAVStreamManager::VideoStreamAllocate(
const VideoStreamStruct &allocateArgs, uint16_t &outStreamID) {
outStreamID = kInvalidStreamID;
bool isRequestSupportedByAnyAvailableStream = false;
// Check if allocation request can be supported
for (const auto &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (stream.IsCompatible(allocateArgs)) {
isRequestSupportedByAnyAvailableStream = true;
break;
}
}
if (!isRequestSupportedByAnyAvailableStream) {
return Status::DynamicConstraintError;
}
// Try to reuse an allocated stream
std::optional<uint16_t> reusableStreamId =
GetCameraAVStreamMgmtServer()->GetReusableVideoStreamId(allocateArgs);
if (reusableStreamId.has_value()) {
// Found a stream that can be reused
outStreamID = reusableStreamId.value();
ChipLogProgress(Camera, "Matching pre-allocated stream with ID: %d exists",
outStreamID);
return Status::Success;
}
// Try to find an unused compatible available stream
for (auto &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (!stream.isAllocated && stream.IsCompatible(allocateArgs)) {
uint32_t candidateEncodedPixelRate = allocateArgs.maxFrameRate *
allocateArgs.maxResolution.height *
allocateArgs.maxResolution.width;
bool encoderRequired = true;
if (!GetCameraAVStreamMgmtServer()
->IsResourceAvailableForStreamAllocation(
candidateEncodedPixelRate, encoderRequired)) {
return Status::ResourceExhausted;
}
stream.isAllocated = true;
outStreamID = stream.videoStreamParams.videoStreamID;
// Set the default viewport on the newly allocated stream
mCameraDeviceHAL->GetCameraHALInterface().SetViewport(
stream, mCameraDeviceHAL->GetCameraHALInterface().GetViewport());
// Set the current frame rate attribute from HAL
GetCameraAVStreamMgmtServer()->SetCurrentFrameRate(
mCameraDeviceHAL->GetCameraHALInterface().GetCurrentFrameRate());
return Status::Success;
}
}
// No compatible stream available for use.
return Status::ResourceExhausted;
}
void CameraAVStreamManager::OnVideoStreamAllocated(
const VideoStreamStruct &allocatedStream, StreamAllocationAction action) {
switch (action) {
case StreamAllocationAction::kNewAllocation:
ChipLogProgress(Camera, "Starting new video stream with ID: %u",
allocatedStream.videoStreamID);
// mCameraDeviceHAL->GetCameraHALInterface().StartVideoStream(allocatedStream);
// Set the current frame rate attribute from HAL once stream has started
GetCameraAVStreamMgmtServer()->SetCurrentFrameRate(
mCameraDeviceHAL->GetCameraHALInterface().GetCurrentFrameRate());
break;
case StreamAllocationAction::kModification:
// Find the stream and restart it with new parameters
for (VideoStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (stream.videoStreamParams.videoStreamID ==
allocatedStream.videoStreamID &&
stream.isAllocated) {
// For modifications, we always stop and restart the stream to ensure
// new parameters are applied
ChipLogProgress(
Camera, "Restarting video stream with ID: %u due to modifications",
allocatedStream.videoStreamID);
break;
}
}
break;
case StreamAllocationAction::kReuse:
ChipLogProgress(Camera,
"Reusing existing video stream with ID: %u without changes",
allocatedStream.videoStreamID);
break;
}
}
Protocols::InteractionModel::Status CameraAVStreamManager::VideoStreamModify(
const uint16_t streamID, const chip::Optional<bool> waterMarkEnabled,
const chip::Optional<bool> osdEnabled) {
for (VideoStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (stream.videoStreamParams.videoStreamID == streamID &&
stream.isAllocated) {
// TODO: Link with HAL APIs to return error
if (waterMarkEnabled.HasValue()) {
stream.videoStreamParams.watermarkEnabled = waterMarkEnabled;
}
if (osdEnabled.HasValue()) {
stream.videoStreamParams.OSDEnabled = osdEnabled;
}
ChipLogError(Camera, "Modified video stream with ID: %d", streamID);
return Status::Success;
}
}
ChipLogError(Camera, "Allocated video stream with ID: %d not found",
streamID);
return Status::NotFound;
}
Protocols::InteractionModel::Status
CameraAVStreamManager::VideoStreamDeallocate(const uint16_t streamID) {
for (VideoStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (stream.videoStreamParams.videoStreamID == streamID &&
stream.isAllocated) {
stream.isAllocated = false;
return Status::Success;
}
}
ChipLogError(Camera,
"Allocated video stream with ID: %d not found internally",
streamID);
return Status::NotFound;
}
Protocols::InteractionModel::Status CameraAVStreamManager::AudioStreamAllocate(
const AudioStreamStruct &allocateArgs, uint16_t &outStreamID) {
outStreamID = kInvalidStreamID;
for (AudioStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableAudioStreams()) {
if (stream.IsCompatible(allocateArgs)) {
outStreamID = stream.audioStreamParams.audioStreamID;
if (!stream.isAllocated) {
stream.isAllocated = true;
return Status::Success;
} else {
ChipLogProgress(Camera,
"Matching pre-allocated stream with ID: %d exists",
outStreamID);
}
return Status::Success;
}
}
return Status::DynamicConstraintError;
}
Protocols::InteractionModel::Status
CameraAVStreamManager::AudioStreamDeallocate(const uint16_t streamID) {
for (AudioStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableAudioStreams()) {
if (stream.audioStreamParams.audioStreamID == streamID &&
stream.isAllocated) {
stream.isAllocated = false;
return Status::Success;
}
}
ChipLogError(Camera,
"Allocated audio stream with ID: %d not found internally",
streamID);
return Status::NotFound;
}
Protocols::InteractionModel::Status
CameraAVStreamManager::SnapshotStreamAllocate(
const SnapshotStreamAllocateArgs &allocateArgs, uint16_t &outStreamID) {
outStreamID = kInvalidStreamID;
bool isRequestSupportedByAnyAvailableStream = false;
// Check if allocation request can be supported
for (const auto &stream : mCameraDeviceHAL->GetCameraHALInterface()
.GetAvailableSnapshotStreams()) {
if (stream.IsCompatible(allocateArgs)) {
isRequestSupportedByAnyAvailableStream = true;
break;
}
}
if (!isRequestSupportedByAnyAvailableStream) {
return Status::DynamicConstraintError;
}
// Try to reuse an allocated stream.
std::optional<uint16_t> reusableStreamId =
GetCameraAVStreamMgmtServer()->GetReusableSnapshotStreamId(allocateArgs);
if (reusableStreamId.has_value()) {
// Found a stream that can be reused
outStreamID = reusableStreamId.value();
ChipLogProgress(Camera, "Matching pre-allocated stream with ID: %d exists",
outStreamID);
return Status::Success;
}
uint32_t candidateEncodedPixelRate = 0;
bool encoderRequired = false;
if (allocateArgs.encodedPixels) {
candidateEncodedPixelRate += allocateArgs.maxFrameRate *
allocateArgs.maxResolution.height *
allocateArgs.maxResolution.width;
if (allocateArgs.hardwareEncoder) {
encoderRequired = true;
}
}
if (!GetCameraAVStreamMgmtServer()->IsResourceAvailableForStreamAllocation(
candidateEncodedPixelRate, encoderRequired)) {
return Status::ResourceExhausted;
}
// If no pre-allocated stream matches, try allocating a new one.
if (mCameraDeviceHAL->GetCameraHALInterface().AllocateSnapshotStream(
allocateArgs, outStreamID) == CameraError::SUCCESS) {
return Status::Success;
}
// Try to find an unused compatible available stream
for (auto &stream : mCameraDeviceHAL->GetCameraHALInterface()
.GetAvailableSnapshotStreams()) {
if (!stream.isAllocated && stream.IsCompatible(allocateArgs)) {
stream.isAllocated = true;
outStreamID = stream.snapshotStreamParams.snapshotStreamID;
// Set the optional Watermark and OSD values that may have been provided.
// This is the initial setting of these values, they may be subsequently
// modified. If the values have no value that is ok, the allocated stream
// will store as such and ignore.
stream.snapshotStreamParams.watermarkEnabled =
allocateArgs.watermarkEnabled;
stream.snapshotStreamParams.OSDEnabled = allocateArgs.OSDEnabled;
return Status::Success;
}
}
// No compatible stream available for use.
return Status::ResourceExhausted;
}
Protocols::InteractionModel::Status CameraAVStreamManager::SnapshotStreamModify(
const uint16_t streamID, const chip::Optional<bool> waterMarkEnabled,
const chip::Optional<bool> osdEnabled) {
for (SnapshotStream &stream : mCameraDeviceHAL->GetCameraHALInterface()
.GetAvailableSnapshotStreams()) {
if (stream.snapshotStreamParams.snapshotStreamID == streamID &&
stream.isAllocated) {
// TODO: Link with HAL APIs to return error
if (waterMarkEnabled.HasValue()) {
stream.snapshotStreamParams.watermarkEnabled = waterMarkEnabled;
}
if (osdEnabled.HasValue()) {
stream.snapshotStreamParams.OSDEnabled = osdEnabled;
}
ChipLogError(Camera, "Modified snapshot stream with ID: %d", streamID);
return Status::Success;
}
}
ChipLogError(Camera, "Allocated snapshot stream with ID: %d not found",
streamID);
return Status::NotFound;
}
Protocols::InteractionModel::Status
CameraAVStreamManager::SnapshotStreamDeallocate(const uint16_t streamID) {
for (SnapshotStream &stream : mCameraDeviceHAL->GetCameraHALInterface()
.GetAvailableSnapshotStreams()) {
if (stream.snapshotStreamParams.snapshotStreamID == streamID &&
stream.isAllocated) {
if (stream.snapshotStreamParams.referenceCount > 0) {
ChipLogError(Camera, "Snapshot stream with ID: %d still in use",
streamID);
return Status::InvalidInState;
}
stream.isAllocated = false;
return Status::Success;
}
}
ChipLogError(Camera, "Allocated snapshot stream with ID: %d not found",
streamID);
return Status::NotFound;
}
void CameraAVStreamManager::OnStreamUsagePrioritiesChanged() {
ChipLogProgress(Camera, "Stream usage priorities changed");
mCameraDeviceHAL->GetCameraHALInterface().SetStreamUsagePriorities(
GetCameraAVStreamMgmtServer()->GetStreamUsagePriorities());
}
void CameraAVStreamManager::OnAttributeChanged(AttributeId attributeId) {
ChipLogProgress(Camera,
"Attribute changed for AttributeId = " ChipLogFormatMEI,
ChipLogValueMEI(attributeId));
switch (attributeId) {
case HDRModeEnabled::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetHDRMode(
GetCameraAVStreamMgmtServer()->GetHDRModeEnabled());
break;
}
case SoftRecordingPrivacyModeEnabled::Id: {
mCameraDeviceHAL->GetCameraHALInterface()
.SetSoftRecordingPrivacyModeEnabled(
GetCameraAVStreamMgmtServer()
->GetSoftRecordingPrivacyModeEnabled());
break;
}
case SoftLivestreamPrivacyModeEnabled::Id: {
mCameraDeviceHAL->GetCameraHALInterface()
.SetSoftLivestreamPrivacyModeEnabled(
GetCameraAVStreamMgmtServer()
->GetSoftLivestreamPrivacyModeEnabled());
break;
}
case NightVision::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetNightVision(
GetCameraAVStreamMgmtServer()->GetNightVision());
break;
}
case NightVisionIllum::Id: {
break;
}
case Viewport::Id: {
// Update the device default
mCameraDeviceHAL->GetCameraHALInterface().SetViewport(
GetCameraAVStreamMgmtServer()->GetViewport());
// Update the per stream viewports on the camera
for (VideoStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
mCameraDeviceHAL->GetCameraHALInterface().SetViewport(
stream, GetCameraAVStreamMgmtServer()->GetViewport());
}
break;
}
case SpeakerMuted::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetSpeakerMuted(
GetCameraAVStreamMgmtServer()->GetSpeakerMuted());
break;
}
case SpeakerVolumeLevel::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetSpeakerVolume(
GetCameraAVStreamMgmtServer()->GetSpeakerVolumeLevel());
break;
}
case MicrophoneMuted::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetMicrophoneMuted(
GetCameraAVStreamMgmtServer()->GetMicrophoneMuted());
break;
}
case MicrophoneVolumeLevel::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetMicrophoneVolume(
GetCameraAVStreamMgmtServer()->GetMicrophoneVolumeLevel());
break;
}
case LocalVideoRecordingEnabled::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetLocalVideoRecordingEnabled(
GetCameraAVStreamMgmtServer()->GetLocalVideoRecordingEnabled());
break;
}
case LocalSnapshotRecordingEnabled::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetLocalSnapshotRecordingEnabled(
GetCameraAVStreamMgmtServer()->GetLocalSnapshotRecordingEnabled());
break;
}
case StatusLightEnabled::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetStatusLightEnabled(
GetCameraAVStreamMgmtServer()->GetStatusLightEnabled());
break;
}
case ImageRotation::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetImageRotation(
GetCameraAVStreamMgmtServer()->GetImageRotation());
break;
}
case ImageFlipHorizontal::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetImageFlipHorizontal(
GetCameraAVStreamMgmtServer()->GetImageFlipHorizontal());
break;
}
case ImageFlipVertical::Id: {
mCameraDeviceHAL->GetCameraHALInterface().SetImageFlipVertical(
GetCameraAVStreamMgmtServer()->GetImageFlipVertical());
break;
}
default:
ChipLogProgress(Camera,
"Unknown Attribute with AttributeId = " ChipLogFormatMEI,
ChipLogValueMEI(attributeId));
}
}
Protocols::InteractionModel::Status
CameraAVStreamManager::CaptureSnapshot(const Nullable<uint16_t> streamID,
const VideoResolutionStruct &resolution,
ImageSnapshot &outImageSnapshot) {
if (mCameraDeviceHAL->GetCameraHALInterface().CaptureSnapshot(
streamID, resolution, outImageSnapshot) == CameraError::SUCCESS) {
return Status::Success;
} else {
return Status::Failure;
}
return Status::Failure;
}
CHIP_ERROR
CameraAVStreamManager::AllocatedVideoStreamsLoaded() {
const std::vector<VideoStreamStruct> &persistedStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedVideoStreams();
auto &halStreams =
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams();
for (auto &halStream : halStreams) {
auto it = std::find_if(persistedStreams.begin(), persistedStreams.end(),
[&](const VideoStreamStruct &persistedStream) {
return persistedStream.videoStreamID ==
halStream.videoStreamParams.videoStreamID;
});
if (it != persistedStreams.end()) {
// Found in persisted streams, mark as allocated in HAL
halStream.isAllocated = true;
ChipLogProgress(
Camera,
"HAL Video Stream ID %u marked as allocated from persisted state.",
halStream.videoStreamParams.videoStreamID);
// Signal for starting the video stream
OnVideoStreamAllocated(*it, StreamAllocationAction::kNewAllocation);
}
}
return CHIP_NO_ERROR;
}
CHIP_ERROR
CameraAVStreamManager::AllocatedAudioStreamsLoaded() {
const std::vector<AudioStreamStruct> &persistedStreams =
GetCameraAVStreamMgmtServer()->GetAllocatedAudioStreams();
auto &halStreams =
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableAudioStreams();
for (auto &halStream : halStreams) {
auto it = std::find_if(persistedStreams.begin(), persistedStreams.end(),
[&](const AudioStreamStruct &persistedStream) {
return persistedStream.audioStreamID ==
halStream.audioStreamParams.audioStreamID;
});
if (it != persistedStreams.end()) {
// Found in persisted streams, mark as allocated in HAL
halStream.isAllocated = true;
ChipLogProgress(
Camera,
"HAL Audio Stream ID %u marked as allocated from persisted state.",
halStream.audioStreamParams.audioStreamID);
}
}
return CHIP_NO_ERROR;
}
CHIP_ERROR
CameraAVStreamManager::AllocatedSnapshotStreamsLoaded() {
ChipLogError(
Camera,
"Allocated snapshot streams could not be loaded: %" CHIP_ERROR_FORMAT,
CHIP_ERROR_NOT_IMPLEMENTED.Format());
return CHIP_ERROR_NOT_IMPLEMENTED;
}
CHIP_ERROR
CameraAVStreamManager::PersistentAttributesLoadedCallback() {
ChipLogDetail(Camera, "Successfully loaded persistent attributes");
CHIP_ERROR err = AllocatedVideoStreamsLoaded();
if (err != CHIP_NO_ERROR) {
ChipLogError(
Camera,
"Allocated video streams could not be loaded: %" CHIP_ERROR_FORMAT,
err.Format());
return err;
}
err = AllocatedAudioStreamsLoaded();
if (err != CHIP_NO_ERROR) {
ChipLogError(
Camera,
"Allocated audio streams could not be loaded: %" CHIP_ERROR_FORMAT,
err.Format());
return err;
}
err = AllocatedSnapshotStreamsLoaded();
if (err != CHIP_NO_ERROR) {
ChipLogError(
Camera,
"Allocated snapshot streams could not be loaded: %" CHIP_ERROR_FORMAT,
err.Format());
return err;
}
return CHIP_NO_ERROR;
}
CHIP_ERROR
CameraAVStreamManager::OnTransportAcquireAudioVideoStreams(
uint16_t audioStreamID, uint16_t videoStreamID) {
// Update the available audio stream in the HAL
for (AudioStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableAudioStreams()) {
if (stream.audioStreamParams.audioStreamID == audioStreamID &&
stream.isAllocated) {
if (stream.audioStreamParams.referenceCount < UINT8_MAX) {
stream.audioStreamParams.referenceCount++;
} else {
ChipLogError(
Camera,
"Attempted to increment audio stream %u ref count beyond max limit",
audioStreamID);
}
}
}
// Update the available video stream in the HAL
for (VideoStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (stream.videoStreamParams.videoStreamID == videoStreamID &&
stream.isAllocated) {
if (stream.videoStreamParams.referenceCount < UINT8_MAX) {
stream.videoStreamParams.referenceCount++;
} else {
ChipLogError(
Camera,
"Attempted to increment video stream %u ref count beyond max limit",
videoStreamID);
}
}
}
// Update the counts in the SDK allocated stream attributes
if (GetCameraAVStreamMgmtServer()->UpdateAudioStreamRefCount(
audioStreamID, /* shouldIncrement = */ true) != CHIP_NO_ERROR) {
ChipLogError(Camera, "Failed to increment audio stream %u ref count in SDK",
audioStreamID);
}
if (GetCameraAVStreamMgmtServer()->UpdateVideoStreamRefCount(
videoStreamID, /* shouldIncrement = */ true) != CHIP_NO_ERROR) {
ChipLogError(Camera, "Failed to increment video stream %u ref count in SDK",
videoStreamID);
}
return CHIP_NO_ERROR;
}
CHIP_ERROR
CameraAVStreamManager::OnTransportReleaseAudioVideoStreams(
uint16_t audioStreamID, uint16_t videoStreamID) {
// Update the available audio stream in the HAL
for (AudioStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableAudioStreams()) {
if (stream.audioStreamParams.audioStreamID == audioStreamID &&
stream.isAllocated) {
if (stream.audioStreamParams.referenceCount > 0) {
stream.audioStreamParams.referenceCount--;
} else {
ChipLogError(Camera,
"Attempted to decrement audio stream %u ref count when it "
"was already 0",
audioStreamID);
}
}
}
// Update the available video stream in the HAL
for (VideoStream &stream :
mCameraDeviceHAL->GetCameraHALInterface().GetAvailableVideoStreams()) {
if (stream.videoStreamParams.videoStreamID == videoStreamID &&
stream.isAllocated) {
if (stream.videoStreamParams.referenceCount > 0) {
stream.videoStreamParams.referenceCount--;
} else {
ChipLogError(Camera,
"Attempted to decrement video stream %u ref count when it "
"was already 0",
videoStreamID);
}
}
}
// Update the counts in the SDK allocated stream attributes
if (GetCameraAVStreamMgmtServer()->UpdateAudioStreamRefCount(
audioStreamID, /* shouldIncrement = */ false) != CHIP_NO_ERROR) {
ChipLogError(Camera, "Failed to decrement audio stream %u ref count in SDK",
audioStreamID);
}
if (GetCameraAVStreamMgmtServer()->UpdateVideoStreamRefCount(
videoStreamID, /* shouldIncrement = */ false) != CHIP_NO_ERROR) {
ChipLogError(Camera, "Failed to decrement video stream %u ref count in SDK",
videoStreamID);
}
return CHIP_NO_ERROR;
}
@@ -0,0 +1,128 @@
#pragma once
#include "camera-avstream-controller.h"
#include "camera-device-interface.h"
#include <app/clusters/camera-av-stream-management-server/camera-av-stream-management-server.h>
#include <app/util/config.h>
#include <vector>
namespace chip {
namespace app {
namespace Clusters {
namespace CameraAvStreamManagement {
/**
* The application delegate to define the options & implement commands.
*/
class CameraAVStreamManager : public CameraAVStreamMgmtDelegate,
public CameraAVStreamController {
public:
Protocols::InteractionModel::Status
VideoStreamAllocate(const VideoStreamStruct &allocateArgs,
uint16_t &outStreamID) override;
Protocols::InteractionModel::Status
VideoStreamModify(const uint16_t streamID,
const chip::Optional<bool> waterMarkEnabled,
const chip::Optional<bool> osdEnabled) override;
Protocols::InteractionModel::Status
VideoStreamDeallocate(const uint16_t streamID) override;
Protocols::InteractionModel::Status
AudioStreamAllocate(const AudioStreamStruct &allocateArgs,
uint16_t &outStreamID) override;
Protocols::InteractionModel::Status
AudioStreamDeallocate(const uint16_t streamID) override;
Protocols::InteractionModel::Status
SnapshotStreamAllocate(const SnapshotStreamAllocateArgs &allocateArgs,
uint16_t &outStreamID) override;
Protocols::InteractionModel::Status
SnapshotStreamModify(const uint16_t streamID,
const chip::Optional<bool> waterMarkEnabled,
const chip::Optional<bool> osdEnabled) override;
Protocols::InteractionModel::Status
SnapshotStreamDeallocate(const uint16_t streamID) override;
void OnVideoStreamAllocated(const VideoStreamStruct &allocatedStream,
StreamAllocationAction action) override;
void OnStreamUsagePrioritiesChanged() override;
void OnAttributeChanged(AttributeId attributeId) override;
Protocols::InteractionModel::Status
CaptureSnapshot(const chip::app::DataModel::Nullable<uint16_t> streamID,
const VideoResolutionStruct &resolution,
ImageSnapshot &outImageSnapshot) override;
CHIP_ERROR
ValidateStreamUsage(
StreamUsageEnum streamUsage,
Optional<DataModel::Nullable<uint16_t>> &videoStreamId,
Optional<DataModel::Nullable<uint16_t>> &audioStreamId) override;
CHIP_ERROR
ValidateVideoStreamID(uint16_t videoStreamId) override;
CHIP_ERROR
ValidateAudioStreamID(uint16_t audioStreamId) override;
CHIP_ERROR IsHardPrivacyModeActive(bool &isActive) override;
CHIP_ERROR IsSoftRecordingPrivacyModeActive(bool &isActive) override;
CHIP_ERROR IsSoftLivestreamPrivacyModeActive(bool &isActive) override;
bool HasAllocatedVideoStreams() override;
bool HasAllocatedAudioStreams() override;
CHIP_ERROR SetHardPrivacyModeOn(bool hardPrivacyMode) override;
CHIP_ERROR PersistentAttributesLoadedCallback() override;
CHIP_ERROR
OnTransportAcquireAudioVideoStreams(uint16_t audioStreamID,
uint16_t videoStreamID) override;
CHIP_ERROR
OnTransportReleaseAudioVideoStreams(uint16_t audioStreamID,
uint16_t videoStreamID) override;
const std::vector<
chip::app::Clusters::CameraAvStreamManagement::VideoStreamStruct> &
GetAllocatedVideoStreams() const override;
const std::vector<
chip::app::Clusters::CameraAvStreamManagement::AudioStreamStruct> &
GetAllocatedAudioStreams() const override;
void GetBandwidthForStreams(
const Optional<DataModel::Nullable<uint16_t>> &videoStreamId,
const Optional<DataModel::Nullable<uint16_t>> &audioStreamId,
uint32_t &outBandwidthbps) override;
CameraAVStreamManager() = default;
~CameraAVStreamManager() = default;
void SetCameraDeviceHAL(CameraDeviceInterface *aCameraDevice);
private:
CHIP_ERROR AllocatedVideoStreamsLoaded();
CHIP_ERROR AllocatedAudioStreamsLoaded();
CHIP_ERROR AllocatedSnapshotStreamsLoaded();
CameraDeviceInterface *mCameraDeviceHAL = nullptr;
};
} // namespace CameraAvStreamManagement
} // namespace Clusters
} // namespace app
} // namespace chip
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,167 @@
#pragma once
#include "camera-device-interface.h"
#include <app-common/zap-generated/cluster-enums.h>
#include <app/CASESessionManager.h>
#include <app/clusters/webrtc-transport-provider-server/webrtc-transport-provider-server.h>
#include <map>
#include <webrtc-transport.h>
namespace chip {
namespace app {
namespace Clusters {
namespace WebRTCTransportProvider {
using ICEServerDecodableStruct =
chip::app::Clusters::Globals::Structs::ICEServerStruct::DecodableType;
using WebRTCSessionStruct =
chip::app::Clusters::Globals::Structs::WebRTCSessionStruct::Type;
using ICECandidateStruct =
chip::app::Clusters::Globals::Structs::ICECandidateStruct::Type;
using StreamUsageEnum = chip::app::Clusters::Globals::StreamUsageEnum;
using WebRTCEndReasonEnum = chip::app::Clusters::Globals::WebRTCEndReasonEnum;
class WebRTCProviderManager : public Delegate,
public WebRTCTransportProviderController {
public:
WebRTCProviderManager()
: mOnConnectedCallback(OnDeviceConnected, this),
mOnConnectionFailureCallback(OnDeviceConnectionFailure, this) {}
~WebRTCProviderManager() { CloseConnection(); };
void Init();
void CloseConnection();
void SetWebRTCTransportProvider(std::unique_ptr<WebRTCTransportProviderServer>
webRTCTransportProvider) override;
CHIP_ERROR HandleSolicitOffer(
const OfferRequestArgs &args,
chip::app::Clusters::WebRTCTransportProvider::WebRTCSessionStruct
&outSession,
bool &outDeferredOffer) override;
CHIP_ERROR
HandleProvideOffer(
const ProvideOfferRequestArgs &args,
chip::app::Clusters::WebRTCTransportProvider::WebRTCSessionStruct
&outSession) override;
CHIP_ERROR HandleProvideAnswer(uint16_t sessionId,
const std::string &sdpAnswer) override;
CHIP_ERROR HandleProvideICECandidates(
uint16_t sessionId,
const std::vector<ICECandidateStruct> &candidates) override;
CHIP_ERROR HandleEndSession(
uint16_t sessionId,
chip::app::Clusters::WebRTCTransportProvider::WebRTCEndReasonEnum
reasonCode,
chip::app::DataModel::Nullable<uint16_t> videoStreamID,
chip::app::DataModel::Nullable<uint16_t> audioStreamID) override;
CHIP_ERROR ValidateStreamUsage(
StreamUsageEnum streamUsage,
chip::Optional<chip::app::DataModel::Nullable<uint16_t>> &videoStreamId,
chip::Optional<chip::app::DataModel::Nullable<uint16_t>> &audioStreamId)
override;
void SetCameraDevice(CameraDeviceInterface *aCameraDevice);
CHIP_ERROR ValidateVideoStreamID(uint16_t videoStreamId) override;
CHIP_ERROR ValidateAudioStreamID(uint16_t audioStreamId) override;
CHIP_ERROR IsStreamUsageSupported(StreamUsageEnum streamUsage) override;
CHIP_ERROR IsHardPrivacyModeActive(bool &isActive) override;
CHIP_ERROR IsSoftRecordingPrivacyModeActive(bool &isActive) override;
CHIP_ERROR IsSoftLivestreamPrivacyModeActive(bool &isActive) override;
bool HasAllocatedVideoStreams() override;
bool HasAllocatedAudioStreams() override;
CHIP_ERROR ValidateSFrameConfig(uint16_t cipherSuite, size_t baseKeyLength) override;
CHIP_ERROR IsUTCTimeNull(bool & isNull) override;
void LiveStreamPrivacyModeChanged(bool privacyModeEnabled);
WebrtcTransport *GetTransport(uint16_t sessionId);
private:
void ScheduleOfferSend(uint16_t sessionId);
void ScheduleICECandidatesSend(uint16_t sessionId);
void ScheduleAnswerSend(uint16_t sessionId);
void ScheduleEndSend(uint16_t sessionId);
void RegisterWebrtcTransport(uint16_t sessionId);
void UnregisterWebrtcTransport(uint16_t sessionId);
CHIP_ERROR SendOfferCommand(chip::Messaging::ExchangeManager &exchangeMgr,
const chip::SessionHandle &sessionHandle,
uint16_t sessionId);
CHIP_ERROR SendAnswerCommand(chip::Messaging::ExchangeManager &exchangeMgr,
const chip::SessionHandle &sessionHandle,
uint16_t sessionId);
CHIP_ERROR
SendICECandidatesCommand(chip::Messaging::ExchangeManager &exchangeMgr,
const chip::SessionHandle &sessionHandle,
uint16_t sessionId);
CHIP_ERROR SendEndCommand(chip::Messaging::ExchangeManager &exchangeMgr,
const chip::SessionHandle &sessionHandle,
uint16_t sessionId, WebRTCEndReasonEnum endReason);
CHIP_ERROR AcquireAudioVideoStreams(uint16_t sessionId);
CHIP_ERROR ReleaseAudioVideoStreams(uint16_t sessionId);
static void OnDeviceConnected(void *context,
chip::Messaging::ExchangeManager &exchangeMgr,
const chip::SessionHandle &sessionHandle);
static void OnDeviceConnectionFailure(void *context,
const chip::ScopedNodeId &peerId,
CHIP_ERROR error);
// WebRTC Callbacks
void OnLocalDescription(const std::string &sdp, SDPType type,
const uint16_t sessionId);
void OnConnectionStateChanged(bool connected, const uint16_t sessionId);
chip::Callback::Callback<chip::OnDeviceConnected> mOnConnectedCallback;
chip::Callback::Callback<chip::OnDeviceConnectionFailure>
mOnConnectionFailureCallback;
std::unordered_map<uint16_t, std::unique_ptr<WebrtcTransport>>
mWebrtcTransportMap;
// This is to retrieve the sessionIds for a given NodeId
std::map<ScopedNodeId, uint16_t> mSessionIdMap;
std::unique_ptr<WebRTCTransportProviderServer> mWebRTCTransportProvider =
nullptr;
// Handle to the Camera Device interface. For accessing other
// clusters, if required.
CameraDeviceInterface *mCameraDevice = nullptr;
bool mSoftLiveStreamPrivacyEnabled = false;
};
} // namespace WebRTCTransportProvider
} // namespace Clusters
} // namespace app
} // namespace chip
+243
View File
@@ -0,0 +1,243 @@
#include "camera-app.h"
using namespace chip;
using namespace chip::app;
using namespace chip::app::Clusters;
using namespace chip::app::Clusters::Chime;
using namespace chip::app::Clusters::WebRTCTransportProvider;
using namespace chip::app::Clusters::CameraAvStreamManagement;
static constexpr uint32_t kBitsPerMegabit = 1000000;
template <typename T> using List = chip::app::DataModel::List<T>;
using Status = Protocols::InteractionModel::Status;
CameraApp::CameraApp(chip::EndpointId aClustersEndpoint,
CameraDeviceInterface *aCameraDevice) {
mEndpoint = aClustersEndpoint;
mCameraDevice = aCameraDevice;
// Instantiate WebRTCTransport Provider
mWebRTCTransportProviderPtr = std::make_unique<WebRTCTransportProviderServer>(
mCameraDevice->GetWebRTCProviderDelegate(), mEndpoint);
// Fetch all initialization parameters for CameraAVStreamMgmt Server
BitFlags<CameraAvStreamManagement::Feature> avsmFeatures;
BitFlags<CameraAvStreamManagement::OptionalAttribute> avsmOptionalAttrs;
avsmFeatures.Set(CameraAvStreamManagement::Feature::kSnapshot);
avsmFeatures.Set(CameraAvStreamManagement::Feature::kVideo);
// Enable the Watermark and OSD features if camera supports
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsWatermark()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kWatermark);
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsOSD()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kOnScreenDisplay);
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsSoftPrivacy()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kPrivacy);
}
// Check microphone support to set Audio feature
if (mCameraDevice->GetCameraHALInterface().HasMicrophone()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kAudio);
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kMicrophoneAGCEnabled);
}
if (mCameraDevice->GetCameraHALInterface().HasLocalStorage()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kLocalStorage);
}
// Check if camera has speaker
if (mCameraDevice->GetCameraHALInterface().HasSpeaker()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kSpeaker);
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsHDR()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kHighDynamicRange);
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsNightVision()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kNightVision);
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kNightVisionIllum);
}
if (mCameraDevice->GetCameraHALInterface().HasHardPrivacySwitch()) {
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kHardPrivacyModeOn);
}
if (mCameraDevice->GetCameraHALInterface().HasStatusLight()) {
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kStatusLightEnabled);
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kStatusLightBrightness);
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsImageControl()) {
avsmFeatures.Set(CameraAvStreamManagement::Feature::kImageControl);
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kImageFlipVertical);
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kImageFlipHorizontal);
avsmOptionalAttrs.Set(
CameraAvStreamManagement::OptionalAttribute::kImageRotation);
}
uint32_t maxConcurrentVideoEncoders =
mCameraDevice->GetCameraHALInterface().GetMaxConcurrentEncoders();
uint32_t maxEncodedPixelRate =
mCameraDevice->GetCameraHALInterface().GetMaxEncodedPixelRate();
VideoSensorParamsStruct sensorParams =
mCameraDevice->GetCameraHALInterface().GetVideoSensorParams();
bool nightVisionUsesInfrared =
mCameraDevice->GetCameraHALInterface().GetNightVisionUsesInfrared();
VideoResolutionStruct minViewport =
mCameraDevice->GetCameraHALInterface().GetMinViewport();
std::vector<RateDistortionTradeOffStruct> rateDistortionTradeOffPoints =
mCameraDevice->GetCameraHALInterface().GetRateDistortionTradeOffPoints();
uint32_t maxContentBufferSize =
mCameraDevice->GetCameraHALInterface().GetMaxContentBufferSize();
AudioCapabilitiesStruct micCapabilities =
mCameraDevice->GetCameraHALInterface().GetMicrophoneCapabilities();
AudioCapabilitiesStruct spkrCapabilities =
mCameraDevice->GetCameraHALInterface().GetSpeakerCapabilities();
TwoWayTalkSupportTypeEnum twowayTalkSupport =
mCameraDevice->GetCameraHALInterface().HasMicrophone() &&
mCameraDevice->GetCameraHALInterface().HasSpeaker()
? TwoWayTalkSupportTypeEnum::kFullDuplex
: TwoWayTalkSupportTypeEnum::kNotSupported;
std::vector<SnapshotCapabilitiesStruct> snapshotCapabilities =
mCameraDevice->GetCameraHALInterface().GetSnapshotCapabilities();
uint32_t maxNetworkBandwidth =
mCameraDevice->GetCameraHALInterface().GetMaxNetworkBandwidth() *
kBitsPerMegabit;
std::vector<StreamUsageEnum> supportedStreamUsages =
mCameraDevice->GetCameraHALInterface().GetSupportedStreamUsages();
std::vector<StreamUsageEnum> streamUsagePriorities =
mCameraDevice->GetCameraHALInterface().GetStreamUsagePriorities();
// Instantiate the CameraAVStreamMgmt Server
mAVStreamMgmtServerPtr = std::make_unique<CameraAVStreamMgmtServer>(
mCameraDevice->GetCameraAVStreamMgmtDelegate(), mEndpoint, avsmFeatures,
avsmOptionalAttrs, maxConcurrentVideoEncoders, maxEncodedPixelRate,
sensorParams, nightVisionUsesInfrared, minViewport,
rateDistortionTradeOffPoints, maxContentBufferSize, micCapabilities,
spkrCapabilities, twowayTalkSupport, snapshotCapabilities,
maxNetworkBandwidth, supportedStreamUsages, streamUsagePriorities);
}
void CameraApp::InitializeCameraAVStreamMgmt() {
// Set the attribute defaults
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsHDR()) {
mAVStreamMgmtServerPtr->SetHDRModeEnabled(
mCameraDevice->GetCameraHALInterface().GetHDRMode());
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsSoftPrivacy()) {
mAVStreamMgmtServerPtr->SetSoftRecordingPrivacyModeEnabled(
mCameraDevice->GetCameraHALInterface()
.GetSoftRecordingPrivacyModeEnabled());
mAVStreamMgmtServerPtr->SetSoftLivestreamPrivacyModeEnabled(
mCameraDevice->GetCameraHALInterface()
.GetSoftLivestreamPrivacyModeEnabled());
}
if (mCameraDevice->GetCameraHALInterface().HasHardPrivacySwitch()) {
mAVStreamMgmtServerPtr->SetHardPrivacyModeOn(
mCameraDevice->GetCameraHALInterface().GetHardPrivacyMode());
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsNightVision()) {
mAVStreamMgmtServerPtr->SetNightVision(
mCameraDevice->GetCameraHALInterface().GetNightVision());
}
mAVStreamMgmtServerPtr->SetViewport(
mCameraDevice->GetCameraHALInterface().GetViewport());
if (mCameraDevice->GetCameraHALInterface().HasSpeaker()) {
mAVStreamMgmtServerPtr->SetSpeakerMuted(
mCameraDevice->GetCameraHALInterface().GetSpeakerMuted());
mAVStreamMgmtServerPtr->SetSpeakerVolumeLevel(
mCameraDevice->GetCameraHALInterface().GetSpeakerVolume());
mAVStreamMgmtServerPtr->SetSpeakerMaxLevel(
mCameraDevice->GetCameraHALInterface().GetSpeakerMaxLevel());
mAVStreamMgmtServerPtr->SetSpeakerMinLevel(
mCameraDevice->GetCameraHALInterface().GetSpeakerMinLevel());
}
if (mCameraDevice->GetCameraHALInterface().HasMicrophone()) {
mAVStreamMgmtServerPtr->SetMicrophoneMuted(
mCameraDevice->GetCameraHALInterface().GetMicrophoneMuted());
mAVStreamMgmtServerPtr->SetMicrophoneVolumeLevel(
mCameraDevice->GetCameraHALInterface().GetMicrophoneVolume());
mAVStreamMgmtServerPtr->SetMicrophoneMaxLevel(
mCameraDevice->GetCameraHALInterface().GetMicrophoneMaxLevel());
mAVStreamMgmtServerPtr->SetMicrophoneMinLevel(
mCameraDevice->GetCameraHALInterface().GetMicrophoneMinLevel());
}
// Video and Snapshot features are already enabled.
if (mCameraDevice->GetCameraHALInterface().HasLocalStorage()) {
mAVStreamMgmtServerPtr->SetLocalVideoRecordingEnabled(
mCameraDevice->GetCameraHALInterface().GetLocalVideoRecordingEnabled());
mAVStreamMgmtServerPtr->SetLocalSnapshotRecordingEnabled(
mCameraDevice->GetCameraHALInterface()
.GetLocalSnapshotRecordingEnabled());
}
if (mCameraDevice->GetCameraHALInterface().HasStatusLight()) {
mAVStreamMgmtServerPtr->SetStatusLightEnabled(
mCameraDevice->GetCameraHALInterface().GetStatusLightEnabled());
}
if (mCameraDevice->GetCameraHALInterface().GetCameraSupportsImageControl()) {
mAVStreamMgmtServerPtr->SetImageRotation(
mCameraDevice->GetCameraHALInterface().GetImageRotation());
mAVStreamMgmtServerPtr->SetImageFlipVertical(
mCameraDevice->GetCameraHALInterface().GetImageFlipVertical());
mAVStreamMgmtServerPtr->SetImageFlipHorizontal(
mCameraDevice->GetCameraHALInterface().GetImageFlipHorizontal());
}
mAVStreamMgmtServerPtr->Init();
}
void CameraApp::InitCameraDeviceClusters() {
// Initialize Cluster Servers
mWebRTCTransportProviderPtr->Init();
mCameraDevice->GetWebRTCProviderController().SetWebRTCTransportProvider(
std::move(mWebRTCTransportProviderPtr));
InitializeCameraAVStreamMgmt();
}
void CameraApp::ShutdownCameraDeviceClusters() {
ChipLogDetail(Camera,
"CameraAppShutdown: Shutting down Camera device clusters");
mWebRTCTransportProviderPtr->Shutdown();
}
static constexpr EndpointId kCameraEndpointId = 1;
std::unique_ptr<CameraApp> gCameraApp;
void CameraAppInit(CameraDeviceInterface *cameraDevice) {
gCameraApp = std::make_unique<CameraApp>(kCameraEndpointId, cameraDevice);
gCameraApp.get()->InitCameraDeviceClusters();
ChipLogDetail(Camera, "CameraAppInit: Initialized Camera clusters");
}
void CameraAppShutdown() {
ChipLogDetail(Camera, "CameraAppShutdown: Shutting down Camera app");
gCameraApp.get()->ShutdownCameraDeviceClusters();
gCameraApp = nullptr;
}
+47
View File
@@ -0,0 +1,47 @@
#pragma once
#include <app-common/zap-generated/cluster-objects.h>
#include "camera-device-interface.h"
#include <app/util/config.h>
#include <cstring>
#include <protocols/interaction_model/StatusCode.h>
#include <utility>
// Camera App defines all the cluster servers needed for a particular device
class CameraApp {
public:
// This class is responsible for initialising all the camera clusters and
// managing the interactions between them
CameraApp(chip::EndpointId aClustersEndpoint,
CameraDeviceInterface *cameraDevice);
// Initialize all the camera device clusters.
void InitCameraDeviceClusters();
// Shutdown all the camera device clusters
void ShutdownCameraDeviceClusters();
private:
chip::EndpointId mEndpoint;
CameraDeviceInterface *mCameraDevice;
// SDK cluster servers
std::unique_ptr<chip::app::Clusters::WebRTCTransportProvider::
WebRTCTransportProviderServer>
mWebRTCTransportProviderPtr;
// std::unique_ptr<chip::app::Clusters::ChimeServer> mChimeServerPtr;
std::unique_ptr<
chip::app::Clusters::CameraAvStreamManagement::CameraAVStreamMgmtServer>
mAVStreamMgmtServerPtr;
// std::unique_ptr<chip::app::Clusters::CameraAvSettingsUserLevelManagement::CameraAvSettingsUserLevelMgmtServer>
// mAVSettingsUserLevelMgmtServerPtr;
// Helper to set attribute defaults for CameraAVStreamMgmt
void InitializeCameraAVStreamMgmt();
};
void CameraAppInit(CameraDeviceInterface *cameraDevice);
void CameraAppShutdown();
@@ -0,0 +1,58 @@
#pragma once
#include <app/clusters/camera-av-stream-management-server/camera-av-stream-management-server.h>
namespace chip {
namespace app {
namespace Clusters {
namespace CameraAvStreamManagement {
/**
* The application interface to define the options & implement commands.
*/
class CameraAVStreamController {
public:
virtual ~CameraAVStreamController() = default;
virtual CHIP_ERROR ValidateStreamUsage(
StreamUsageEnum streamUsage,
Optional<DataModel::Nullable<uint16_t>> &videoStreamId,
Optional<DataModel::Nullable<uint16_t>> &audioStreamId) = 0;
virtual CHIP_ERROR ValidateVideoStreamID(uint16_t videoStreamId) = 0;
virtual CHIP_ERROR ValidateAudioStreamID(uint16_t audioStreamId) = 0;
virtual CHIP_ERROR IsHardPrivacyModeActive(bool &isActive) = 0;
virtual CHIP_ERROR IsSoftRecordingPrivacyModeActive(bool &isActive) = 0;
virtual CHIP_ERROR IsSoftLivestreamPrivacyModeActive(bool &isActive) = 0;
virtual CHIP_ERROR SetHardPrivacyModeOn(bool hardPrivacyMode) = 0;
virtual bool HasAllocatedVideoStreams() = 0;
virtual bool HasAllocatedAudioStreams() = 0;
/**
* @brief Calculates the total bandwidth in bps for the given video and audio
* stream IDs. It iterates through the allocated video and audio streams, and
* if a matching stream ID is found, its bit rate (maxBitRate for video,
* bitRate for audio) is added to the total bandwidth.
*
* @param videoStreamId Optional nullable ID of the video stream.
* @param audioStreamId Optional nullable ID of the audio stream.
* @param outBandwidthbps Output parameter for the calculated total bandwidth
* in bps.
*/
virtual void GetBandwidthForStreams(
const Optional<DataModel::Nullable<uint16_t>> &videoStreamId,
const Optional<DataModel::Nullable<uint16_t>> &audioStreamId,
uint32_t &outBandwidthbps) = 0;
};
} // namespace CameraAvStreamManagement
} // namespace Clusters
} // namespace app
} // namespace chip
@@ -0,0 +1,332 @@
#pragma once
#include "camera-avstream-controller.h"
// #include <media-controller.h>
#include "webrtc-provider-controller.h"
// #include
// <app/clusters/camera-av-settings-user-level-management-server/camera-av-settings-user-level-management-server.h>
#include <app/clusters/camera-av-stream-management-server/camera-av-stream-management-server.h>
// #include <app/clusters/chime-server/chime-server.h>
#include <app/clusters/webrtc-transport-provider-server/webrtc-transport-provider-server.h>
using chip::app::Clusters::CameraAvStreamManagement::AudioCapabilitiesStruct;
using chip::app::Clusters::CameraAvStreamManagement::AudioStreamStruct;
using chip::app::Clusters::CameraAvStreamManagement::ImageSnapshot;
using chip::app::Clusters::CameraAvStreamManagement::
RateDistortionTradeOffStruct;
using chip::app::Clusters::CameraAvStreamManagement::SnapshotCapabilitiesStruct;
using chip::app::Clusters::CameraAvStreamManagement::SnapshotStreamStruct;
using chip::app::Clusters::CameraAvStreamManagement::TriStateAutoEnum;
using chip::app::Clusters::CameraAvStreamManagement::VideoResolutionStruct;
using chip::app::Clusters::CameraAvStreamManagement::VideoSensorParamsStruct;
using chip::app::Clusters::CameraAvStreamManagement::VideoStreamStruct;
using chip::app::Clusters::Globals::StreamUsageEnum;
struct VideoStream {
VideoStreamStruct videoStreamParams;
bool isAllocated; // Flag to indicate if the stream is allocated.
chip::app::Clusters::Globals::Structs::ViewportStruct::Type
viewport; // Stream specific viewport, defaults to the camera viewport
void *videoContext; // Platform-specific context object associated with
// video stream;
bool IsCompatible(const VideoStreamStruct &inputParams) const {
return (videoStreamParams.videoCodec == inputParams.videoCodec &&
videoStreamParams.minFrameRate <= inputParams.minFrameRate &&
videoStreamParams.maxFrameRate >= inputParams.maxFrameRate &&
videoStreamParams.minResolution.width <=
inputParams.minResolution.width &&
videoStreamParams.minResolution.height <=
inputParams.minResolution.height &&
videoStreamParams.maxResolution.width >=
inputParams.maxResolution.width &&
videoStreamParams.maxResolution.height >=
inputParams.maxResolution.height &&
videoStreamParams.minBitRate <= inputParams.minBitRate &&
videoStreamParams.maxBitRate >= inputParams.maxBitRate &&
videoStreamParams.keyFrameInterval == inputParams.keyFrameInterval);
}
};
struct AudioStream {
AudioStreamStruct audioStreamParams;
bool isAllocated; // Flag to indicate if the stream is allocated.
void *audioContext; // Platform-specific context object associated with
// video stream;
bool IsCompatible(const AudioStreamStruct &inputParams) const {
return (audioStreamParams.audioCodec == inputParams.audioCodec &&
audioStreamParams.channelCount == inputParams.channelCount &&
audioStreamParams.sampleRate == inputParams.sampleRate &&
audioStreamParams.bitDepth == inputParams.bitDepth);
}
};
struct SnapshotStream {
SnapshotStreamStruct snapshotStreamParams;
bool isAllocated; // Flag to indicate if the stream is allocated
void *snapshotContext; // Platform-specific context object associated with
// snapshot stream;
bool IsCompatible(const chip::app::Clusters::CameraAvStreamManagement::
CameraAVStreamMgmtDelegate::SnapshotStreamAllocateArgs
&inputParams) const {
return (snapshotStreamParams.imageCodec == inputParams.imageCodec &&
snapshotStreamParams.quality == inputParams.quality &&
snapshotStreamParams.frameRate <= inputParams.maxFrameRate &&
snapshotStreamParams.minResolution.width <=
inputParams.minResolution.width &&
snapshotStreamParams.minResolution.height <=
inputParams.minResolution.height &&
snapshotStreamParams.maxResolution.width >=
inputParams.maxResolution.width &&
snapshotStreamParams.maxResolution.height >=
inputParams.maxResolution.height);
}
};
// Enumeration for common camera errors
enum class CameraError {
SUCCESS,
ERROR_INIT_FAILED,
ERROR_VIDEO_STREAM_START_FAILED,
ERROR_VIDEO_STREAM_STOP_FAILED,
ERROR_AUDIO_STREAM_START_FAILED,
ERROR_AUDIO_STREAM_STOP_FAILED,
ERROR_SNAPSHOT_STREAM_START_FAILED,
ERROR_SNAPSHOT_STREAM_STOP_FAILED,
ERROR_CAPTURE_SNAPSHOT_FAILED,
ERROR_CONFIG_FAILED,
ERROR_RESOURCE_EXHAUSTED,
ERROR_NOT_IMPLEMENTED, // For features not supported on a platform
};
// Camera Device Interface defines all the clusters that need to be implemented
// for a Camera Device
class CameraDeviceInterface {
public:
virtual ~CameraDeviceInterface() = default;
// Getter for WebRTCProvider Delegate
virtual chip::app::Clusters::WebRTCTransportProvider::Delegate &
GetWebRTCProviderDelegate() = 0;
// Getter for WebRTCProvider Controller
virtual chip::app::Clusters::WebRTCTransportProvider::
WebRTCTransportProviderController &
GetWebRTCProviderController() = 0;
// Getter for CameraAVStreamManagement Delegate
virtual chip::app::Clusters::CameraAvStreamManagement::
CameraAVStreamMgmtDelegate &
GetCameraAVStreamMgmtDelegate() = 0;
// Getter for CameraAVStreamManagement Controller
virtual chip::app::Clusters::CameraAvStreamManagement::
CameraAVStreamController &
GetCameraAVStreamMgmtController() = 0;
// Class defining the Camera HAL interface
class CameraHALInterface {
public:
// Virtual destructor
virtual ~CameraHALInterface() = default;
// Initialize the camera hardware
virtual CameraError InitializeCameraDevice() = 0;
virtual CameraError InitializeStreams() = 0;
virtual std::vector<VideoStream> &GetAvailableVideoStreams() = 0;
virtual std::vector<AudioStream> &GetAvailableAudioStreams() = 0;
virtual std::vector<SnapshotStream> &GetAvailableSnapshotStreams() = 0;
// Capture a snapshot image
virtual CameraError
CaptureSnapshot(const chip::app::DataModel::Nullable<uint16_t> streamID,
const VideoResolutionStruct &resolution,
ImageSnapshot &outImageSnapshot) = 0;
// Allocate snapshot stream
virtual CameraError AllocateSnapshotStream(
const chip::app::Clusters::CameraAvStreamManagement::
CameraAVStreamMgmtDelegate::SnapshotStreamAllocateArgs &args,
uint16_t &outStreamID) = 0;
// Get the maximum number of concurrent encoders supported by camera.
virtual uint8_t GetMaxConcurrentEncoders() = 0;
// Get the maximum data rate in encoded pixels per second that the
// camera can produce given the hardware encoders it has.
virtual uint32_t GetMaxEncodedPixelRate() = 0;
// Get the Video sensor params(sensor dimensions, framerate, HDR
// capabilities)
virtual VideoSensorParamsStruct &GetVideoSensorParams() = 0;
// Get indication whether camera supports high dynamic range for video
virtual bool GetCameraSupportsHDR() = 0;
// Get indication whether camera supports night vision
virtual bool GetCameraSupportsNightVision() = 0;
// Get indication whether camera night vision using infrared
virtual bool GetNightVisionUsesInfrared() = 0;
// Get indication whether camera supports image control
virtual bool GetCameraSupportsImageControl() = 0;
// Get indication whether camera supports watermark for video and snapshot
virtual bool GetCameraSupportsWatermark() = 0;
// Get indication whether camera supports on-screen display for video and
// snapshot
virtual bool GetCameraSupportsOSD() = 0;
// Get indication whether camera supports soft recording and livestream
// privacy modes
virtual bool GetCameraSupportsSoftPrivacy() = 0;
// Get indication of the min resolution(pixels) that camera allows for
// its viewport.
virtual VideoResolutionStruct &GetMinViewport() = 0;
// Get the rate distortion tradeoff points(min bitrate for resolutions) for
// video codecs.
virtual std::vector<RateDistortionTradeOffStruct> &
GetRateDistortionTradeOffPoints() = 0;
// Get the maximum size of content buffer in bytes. This buffer holds
// compressed and/or raw audio/video content.
virtual uint32_t GetMaxContentBufferSize() = 0;
// Get microphone capabilities.
virtual AudioCapabilitiesStruct &GetMicrophoneCapabilities() = 0;
// Get speaker capabilities.
virtual AudioCapabilitiesStruct &GetSpeakerCapabilities() = 0;
// Get snapshot capabilities
virtual std::vector<SnapshotCapabilitiesStruct> &
GetSnapshotCapabilities() = 0;
// Get the maximum network bandwidth(mbps) that the camera would consume
// for transmission of its media streams.
virtual uint32_t GetMaxNetworkBandwidth() = 0;
// Get the current frame rate of the camera sensor.
virtual uint16_t GetCurrentFrameRate() = 0;
// Enable/Disable High Dynamic Range mode.
virtual CameraError SetHDRMode(bool hdrMode) = 0;
// Get the current camera HDR mode.
virtual bool GetHDRMode() = 0;
// Get Supported Stream usages; Typically set by manudacturer.
// This also sets the default priority of the stream usages.
virtual std::vector<StreamUsageEnum> &GetSupportedStreamUsages() = 0;
// Get stream usage priorities as an ordered list. This is expected to
// be a subset of the SupportedStreamUsages.
virtual std::vector<StreamUsageEnum> &GetStreamUsagePriorities() = 0;
virtual CameraError SetStreamUsagePriorities(
std::vector<StreamUsageEnum> streamUsagePriorities) = 0;
// Get/Set soft recording privacy mode
virtual CameraError
SetSoftRecordingPrivacyModeEnabled(bool softRecordingPrivacyMode) = 0;
virtual bool GetSoftRecordingPrivacyModeEnabled() = 0;
// Get/Set soft livestream privacy mode
virtual CameraError
SetSoftLivestreamPrivacyModeEnabled(bool softLivestreamPrivacyMode) = 0;
virtual bool GetSoftLivestreamPrivacyModeEnabled() = 0;
// Does camera have a hard privacy switch
virtual bool HasHardPrivacySwitch() = 0;
// Get/Set hard privacy mode
virtual CameraError SetHardPrivacyMode(bool hardPrivacyMode) = 0;
virtual bool GetHardPrivacyMode() = 0;
// Get/Set night vision
virtual CameraError SetNightVision(TriStateAutoEnum nightVision) = 0;
virtual TriStateAutoEnum GetNightVision() = 0;
// Set the viewport for all streams
virtual CameraError SetViewport(
const chip::app::Clusters::Globals::Structs::ViewportStruct::Type
&viewPort) = 0;
// Get the current camera viewport.
virtual const chip::app::Clusters::Globals::Structs::ViewportStruct::Type &
GetViewport() = 0;
// Set the viewport for a specific stream
virtual CameraError SetViewport(
VideoStream &stream,
const chip::app::Clusters::Globals::Structs::ViewportStruct::Type
&viewPort) = 0;
// Does camera have a speaker
virtual bool HasSpeaker() = 0;
// Mute/Unmute speaker.
virtual bool GetSpeakerMuted() = 0;
virtual CameraError SetSpeakerMuted(bool muteSpeaker) = 0;
// Get/Set speaker volume level.
virtual uint8_t GetSpeakerVolume() = 0;
virtual CameraError SetSpeakerVolume(uint8_t speakerVol) = 0;
// Get the speaker max and min levels.
virtual uint8_t GetSpeakerMaxLevel() = 0;
virtual uint8_t GetSpeakerMinLevel() = 0;
// Does camera have a microphone
virtual bool HasMicrophone() = 0;
// Mute/Unmute microphone.
virtual CameraError SetMicrophoneMuted(bool muteMicrophone) = 0;
virtual bool GetMicrophoneMuted() = 0;
// Set microphone volume level.
virtual CameraError SetMicrophoneVolume(uint8_t microphoneVol) = 0;
virtual uint8_t GetMicrophoneVolume() = 0;
// Get the microphone max and min levels.
virtual uint8_t GetMicrophoneMaxLevel() = 0;
virtual uint8_t GetMicrophoneMinLevel() = 0;
// Get/Set image control attributes
virtual CameraError SetImageRotation(uint16_t imageRotation) = 0;
virtual uint16_t GetImageRotation() = 0;
virtual CameraError SetImageFlipHorizontal(bool imageFlipHorizontal) = 0;
virtual bool GetImageFlipHorizontal() = 0;
virtual CameraError SetImageFlipVertical(bool imageFlipVertical) = 0;
virtual bool GetImageFlipVertical() = 0;
// Does camera have local storage
virtual bool HasLocalStorage() = 0;
virtual CameraError
SetLocalVideoRecordingEnabled(bool localVideoRecordingEnabled) = 0;
virtual bool GetLocalVideoRecordingEnabled() = 0;
virtual CameraError
SetLocalSnapshotRecordingEnabled(bool localSnapshotRecordingEnabled) = 0;
virtual bool GetLocalSnapshotRecordingEnabled() = 0;
// Does camera have a status light
virtual bool HasStatusLight() = 0;
virtual CameraError SetStatusLightEnabled(bool statusLightEnabled) = 0;
virtual bool GetStatusLightEnabled();
};
virtual CameraHALInterface &GetCameraHALInterface() = 0;
};
@@ -0,0 +1,25 @@
#pragma once
#include <app/clusters/webrtc-transport-provider-server/webrtc-transport-provider-server.h>
namespace chip {
namespace app {
namespace Clusters {
namespace WebRTCTransportProvider {
/**
* The application interface to define the options & implement commands.
*/
class WebRTCTransportProviderController {
public:
virtual ~WebRTCTransportProviderController() = default;
virtual void
SetWebRTCTransportProvider(std::unique_ptr<WebRTCTransportProviderServer>
webRTCTransportProvider) = 0;
};
} // namespace WebRTCTransportProvider
} // namespace Clusters
} // namespace app
} // namespace chip
+28
View File
@@ -0,0 +1,28 @@
dependencies:
espressif/cmake_utilities:
version: "^1"
rules: # will add "optional_component" only when all if clauses are True
- if: "idf_version >=5.0"
- if: "target in [esp32c2]"
network_coprocessor:
version: "*"
path: ${KVS_SDK_PATH}/esp_port/components/network_coprocessor
rules:
- if: "target in [esp32c6]"
esp_webrtc_utils:
path: ${KVS_SDK_PATH}/esp_port/components/esp_webrtc_utils
version: "*"
kvs_webrtc:
path: ${KVS_SDK_PATH}/esp_port/components/kvs_webrtc
version: "*"
signaling_serializer:
path: ${KVS_SDK_PATH}/esp_port/components/signaling_serializer
version: "*"
webrtc_bridge:
path: ${KVS_SDK_PATH}/esp_port/components/webrtc_bridge
version: "*"
@@ -0,0 +1,54 @@
#pragma once
#include <functional>
#include <memory>
#include <string>
#include <vector>
// Forward declarations
class WebRTCPeerConnection;
class WebRTCTrack;
enum class SDPType : uint8_t { Offer, Answer, Pranswer, Rollback };
enum class MediaType : uint8_t {
Audio,
Video,
};
using OnLocalDescriptionCallback =
std::function<void(const std::string &sdp, SDPType type)>;
using OnICECandidateCallback =
std::function<void(const std::string &candidate)>;
using OnConnectionStateCallback = std::function<void(bool connected)>;
using OnTrackCallback = std::function<void(std::shared_ptr<WebRTCTrack> track)>;
// Abstract track interface
class WebRTCTrack {
public:
virtual ~WebRTCTrack() = default;
virtual void SendData(const char *data, size_t size) = 0;
virtual bool IsReady() = 0;
virtual std::string GetType() = 0; // "video" or "audio"
};
// Abstract peer connection interface
class WebRTCPeerConnection {
public:
virtual ~WebRTCPeerConnection() = default;
virtual void SetCallbacks(OnLocalDescriptionCallback onLocalDescription,
OnICECandidateCallback onICECandidate,
OnConnectionStateCallback onConnectionState,
OnTrackCallback onTrack) = 0;
virtual void Close() = 0;
virtual void CreateOffer(uint16_t sessionId) = 0;
virtual void CreateAnswer() = 0;
virtual void SetRemoteDescription(const std::string &sdp, SDPType type) = 0;
virtual void AddRemoteCandidate(const std::string &candidate,
const std::string &mid) = 0;
virtual std::shared_ptr<WebRTCTrack> AddTrack(MediaType mediaType) = 0;
};
std::shared_ptr<WebRTCPeerConnection> CreateWebRTCPeerConnection();
@@ -0,0 +1,184 @@
/*
*
* Copyright (c) 2025 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "webrtc-kvs_esp_port.h"
#include "camera-device.h"
#include "webrtc-kvs_esp_port_utils.h"
#include "webrtc-transport.h"
#include <signaling_serializer.h>
#include <webrtc_bridge.h>
using namespace chip;
using namespace chip::app;
using namespace chip::app::Clusters;
using namespace chip::app::Clusters::WebRTCTransportProvider;
using namespace Camera;
static size_t gSDPLength = CONFIG_MAX_LARGE_BUFFER_SIZE_BYTES;
static size_t gCandidateLength = 1024;
static char peerClientId[SS_MAX_SIGNALING_CLIENT_ID_LEN + 1];
extern CameraDevice gCameraDevice;
KVSWebRTCPeerConnection::KVSWebRTCPeerConnection()
{
std::string peerConnectionId = generateMonotonicPeerConnectionId();
mPeerConnection = std::make_shared<EspWebRTCPeerConnection>(peerConnectionId);
}
void KVSWebRTCPeerConnection::SetCallbacks(OnLocalDescriptionCallback onLocalDescription, OnICECandidateCallback onICECandidate,
OnConnectionStateCallback onConnectionState, OnTrackCallback onTrack)
{
// Received messages are forwarded using webrtc_bridge_send_message
}
void KVSWebRTCPeerConnection::Close()
{
// KVSWebRTC close is handled by the KVSWebRTCManager.
}
void KVSWebRTCPeerConnection::CreateOffer(uint16_t sessionId)
{
// Set local description in KVSWebRTC
// Answer is received via webrtc_bridge_message_received_cb
std::unique_ptr<signaling_msg_t> message(new (std::nothrow) signaling_msg_t());
if (message == nullptr)
{
ChipLogError(Camera, "CreateOffer: failed to allocate signaling_msg_t");
return;
}
message->version = 0;
message->messageType = SIGNALING_MSG_TYPE_TRIGGER_OFFER;
snprintf(message->correlationId, sizeof(message->correlationId), "%u", sessionId);
std::string peerConnectionId = this->GetPeerConnectionId();
snprintf(peerClientId, sizeof(peerClientId), "%s", peerConnectionId.c_str());
memcpy(message->peerClientId, peerClientId, sizeof(peerClientId));
message->payloadLen = 0;
message->payload = NULL;
size_t serialized_len = 0;
char * serialized_msg = serialize_signaling_message(message.get(), &serialized_len);
if (serialized_msg)
{
webrtc_bridge_send_message(serialized_msg, serialized_len);
}
}
void KVSWebRTCPeerConnection::CreateAnswer()
{
// Answer is received from KVSWebRTCManager when offer is sent and received
// via webrtc_bridge.
}
void KVSWebRTCPeerConnection::SetRemoteDescription(const std::string & sdp, SDPType type)
{
// handles SDP Offer received from webrtc requestor.
// Send SDP to KVSWebRTCManager.
char * sdp_json = (char *) malloc(gSDPLength);
std::unique_ptr<signaling_msg_t> message(new (std::nothrow) signaling_msg_t());
if (message == nullptr)
{
ChipLogError(Camera, "SetRemoteDescription: failed to allocate signaling_msg_t");
return;
}
std::string escaped_sdp = json_escape(sdp);
size_t json_len = 0;
if (type == SDPType::Offer)
{
json_len = sprintf(sdp_json, "{\"type\": \"offer\", \"sdp\": \"%s\"}", escaped_sdp.c_str());
ChipLogProgress(Camera, "OFFER: \n%s\n", sdp_json);
message->messageType = SIGNALING_MSG_TYPE_OFFER;
}
else if (type == SDPType::Answer)
{
json_len = sprintf(sdp_json, "{\"type\": \"answer\", \"sdp\": \"%s\"}", escaped_sdp.c_str());
ChipLogProgress(Camera, "ANSWER: \n%s\n", sdp_json);
message->messageType = SIGNALING_MSG_TYPE_ANSWER;
}
message->version = 0;
std::string peerConnectionId = this->GetPeerConnectionId();
snprintf(peerClientId, sizeof(peerClientId), "%s", peerConnectionId.c_str());
memcpy(message->peerClientId, peerClientId, sizeof(peerClientId));
message->payloadLen = json_len;
message->payload = sdp_json;
size_t serialized_len = 0;
char * serialized_msg = serialize_signaling_message(message.get(), &serialized_len);
if (serialized_msg)
{
webrtc_bridge_send_message(serialized_msg, serialized_len);
}
ChipLogProgress(Camera, "SDP LENGTH: %d", serialized_len);
free(sdp_json);
}
void KVSWebRTCPeerConnection::AddRemoteCandidate(const std::string & candidate, const std::string & mid)
{
// Send webrtc requestor's candidates to KVSWebRTCManager.
char * candidate_json = (char *) malloc(gCandidateLength);
std::string escaped_sdp = json_escape(std::string(candidate.begin(), candidate.end()));
size_t json_len = sprintf(candidate_json, "{\"candidate\": \"%s\"}", escaped_sdp.c_str());
ChipLogProgress(Camera, "CANDIDATE: \n%s\n", candidate_json);
std::unique_ptr<signaling_msg_t> message(new (std::nothrow) signaling_msg_t());
if (message == nullptr)
{
ChipLogError(Camera, "AddRemoteCandidate: failed to allocate signaling_msg_t");
return;
}
message->version = 0;
message->messageType = SIGNALING_MSG_TYPE_ICE_CANDIDATE;
std::string peerConnectionId = this->GetPeerConnectionId();
snprintf(peerClientId, sizeof(peerClientId), "%s", peerConnectionId.c_str());
memcpy(message->peerClientId, peerClientId, sizeof(peerClientId));
message->payloadLen = json_len;
message->payload = candidate_json;
size_t serialized_len = 0;
char * serialized_msg = serialize_signaling_message(message.get(), &serialized_len);
if (serialized_msg)
{
webrtc_bridge_send_message(serialized_msg, serialized_len);
}
ChipLogProgress(Camera, "Candidate length: %d", serialized_len);
free(candidate_json);
}
std::shared_ptr<WebRTCTrack> KVSWebRTCPeerConnection::AddTrack(MediaType mediaType)
{
// Addition of tracks is handled by the KVSWebRTCManager.
return nullptr;
}
std::string KVSWebRTCPeerConnection::GetPeerConnectionId()
{
return mPeerConnection->GetPeerConnectionId();
}
std::shared_ptr<WebRTCPeerConnection> CreateWebRTCPeerConnection()
{
return std::make_shared<KVSWebRTCPeerConnection>();
}
@@ -0,0 +1,52 @@
/*
*
* Copyright (c) 2025 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "webrtc-abstract.h"
#include "webrtc-provider-manager.h"
#include <lib/support/logging/CHIPLogging.h>
void webrtc_bridge_message_received_cb(void * data, int len);
class EspWebRTCPeerConnection
{
private:
std::string peerConnectionId;
public:
EspWebRTCPeerConnection(std::string peerConnectionId) { this->peerConnectionId = peerConnectionId; }
std::string GetPeerConnectionId() { return peerConnectionId; }
};
class KVSWebRTCPeerConnection : public WebRTCPeerConnection
{
public:
KVSWebRTCPeerConnection();
void SetCallbacks(OnLocalDescriptionCallback onLocalDescription, OnICECandidateCallback onICECandidate,
OnConnectionStateCallback onConnectionState, OnTrackCallback onTrack) override;
void Close() override;
void CreateOffer(uint16_t sessionId) override;
void CreateAnswer() override;
void SetRemoteDescription(const std::string & sdp, SDPType type) override;
void AddRemoteCandidate(const std::string & candidate, const std::string & mid) override;
std::shared_ptr<WebRTCTrack> AddTrack(MediaType mediaType) override;
std::string GetPeerConnectionId();
private:
std::shared_ptr<EspWebRTCPeerConnection> mPeerConnection;
};
@@ -0,0 +1,372 @@
/*
*
* Copyright (c) 2025 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "camera-device.h"
#include "webrtc-provider-manager.h"
#include <iomanip>
#include <jsmn.h>
#include <signaling_serializer.h>
#include <sstream>
#include <string.h>
#include <webrtc-kvs_esp_port_utils.h>
using namespace chip;
using namespace chip::app;
using namespace chip::app::Clusters;
using namespace chip::app::Clusters::WebRTCTransportProvider;
using namespace Camera;
static const char * TAG = "webrtc-kvs_esp_port_utils";
static size_t gSDPLength = CONFIG_MAX_LARGE_BUFFER_SIZE_BYTES;
static char peerClientId[SS_MAX_SIGNALING_CLIENT_ID_LEN + 1];
extern CameraDevice gCameraDevice;
static std::atomic<unsigned int> peerConnectionCounter{ 0 }; // Starts from 0
static std::string json_unescape(const std::string & input)
{
std::string output;
size_t i = 0;
while (i < input.length())
{
if (input[i] == '\\' && i + 1 < input.length())
{
char next = input[i + 1];
switch (next)
{
case '\"':
output += '\"';
i += 2;
break;
case '\\':
output += '\\';
i += 2;
break;
case 'b':
output += '\b';
i += 2;
break;
case 'f':
output += '\f';
i += 2;
break;
case 'n':
output += '\n';
i += 2;
break;
case 'r':
output += '\r';
i += 2;
break;
case 't':
output += '\t';
i += 2;
break;
case 'u':
if (i + 5 < input.length())
{
std::istringstream iss(input.substr(i + 2, 4));
unsigned int code;
if (iss >> std::hex >> code)
{
output += static_cast<char>(code); // For ASCII-range only
}
i += 6;
}
else
{
output += '?'; // malformed
i += 2;
}
break;
default:
output += next;
i += 2;
}
}
else
{
output += input[i];
++i;
}
}
return output;
}
std::string json_escape(const std::string & input)
{
std::string output;
for (char c : input)
{
switch (c)
{
case '\"':
output += "\\\"";
break;
case '\\':
output += "\\\\";
break;
case '\b':
output += "\\b";
break;
case '\f':
output += "\\f";
break;
case '\n':
output += "\\n";
break;
case '\r':
output += "\\r";
break;
case '\t':
output += "\\t";
break;
default:
if (static_cast<unsigned char>(c) < 0x20)
{
char buf[7];
snprintf(buf, sizeof(buf), "\\u%04x", c);
output += buf;
}
else
{
output += c;
}
}
}
return output;
}
static int extract_sdp(const char * json, char * sdp_buf, size_t sdp_buf_len)
{
if (json == nullptr || sdp_buf == nullptr || sdp_buf_len == 0)
{
ChipLogError(Camera, "extract_sdp failed");
return -1;
}
jsmn_parser parser;
jsmntok_t tokens[64];
int ret;
jsmn_init(&parser);
ret = jsmn_parse(&parser, json, strlen(json), tokens, sizeof(tokens) / sizeof(tokens[0]));
if (ret < 0)
{
printf("Failed to parse JSON: %d\n", ret);
return -1;
}
for (int i = 1; i < ret; i++)
{
if (tokens[i].type == JSMN_STRING && strncmp(json + tokens[i].start, "sdp", tokens[i].end - tokens[i].start) == 0)
{
int len = tokens[i + 1].end - tokens[i + 1].start;
if (len >= sdp_buf_len)
len = sdp_buf_len - 1;
strncpy(sdp_buf, json + tokens[i + 1].start, len);
sdp_buf[len] = '\0';
return 0;
}
}
return -1; // SDP not found
}
static int extract_candidate(const char * json, char * sdp_buf, size_t sdp_buf_len)
{
// Sanity checks for input parameters
if (json == nullptr || sdp_buf == nullptr || sdp_buf_len == 0)
{
ChipLogError(Camera, "extract_candidate failed");
return -1;
}
jsmn_parser parser;
jsmntok_t tokens[64];
int ret;
jsmn_init(&parser);
ret = jsmn_parse(&parser, json, strlen(json), tokens, sizeof(tokens) / sizeof(tokens[0]));
if (ret < 0)
{
printf("Failed to parse JSON: %d\n", ret);
return -1;
}
for (int i = 1; i < ret; i++)
{
if (tokens[i].type == JSMN_STRING && strncmp(json + tokens[i].start, "candidate", tokens[i].end - tokens[i].start) == 0)
{
int len = tokens[i + 1].end - tokens[i + 1].start;
if (len >= sdp_buf_len)
len = sdp_buf_len - 1;
strncpy(sdp_buf, json + tokens[i + 1].start, len);
sdp_buf[len] = '\0';
return 0;
}
}
return -1; // Candidate not found
}
void webrtc_bridge_message_received_cb(void * data, int len)
{
// handle message
printf("Received Message from P4-Streamer: \n%.*s\n", len, (char *) data);
// Use nothrow to check for allocation failure
std::unique_ptr<signaling_msg_t> msg(new (std::nothrow) signaling_msg_t());
if (msg == nullptr)
{
ChipLogError(Camera, "webrtc_bridge_message_received_cb: failed to allocate signaling_msg_t");
return;
}
deserialize_signaling_message((const char *) data, len, msg.get());
char sdp_buf[gSDPLength];
switch (msg->messageType)
{
case SIGNALING_MSG_TYPE_OFFER:
if (extract_sdp(msg->payload, sdp_buf, sizeof(sdp_buf)) == 0)
{
ESP_LOGD(TAG, "Extracted SDP:\n%s\n", sdp_buf);
}
break;
case SIGNALING_MSG_TYPE_ANSWER:
if (extract_sdp(msg->payload, sdp_buf, sizeof(sdp_buf)) == 0)
{
ESP_LOGD(TAG, "Extracted SDP:\n%s\n", sdp_buf);
}
break;
case SIGNALING_MSG_TYPE_ICE_CANDIDATE:
if (extract_candidate(msg->payload, sdp_buf, sizeof(sdp_buf)) == 0)
{
ESP_LOGD(TAG, "Extracted Candidate:\n%s\n", sdp_buf);
}
break;
default:
ESP_LOGE(TAG, "Unknown message type\n");
goto cleanup;
break;
}
{
uint16_t sessionId = 0;
snprintf(peerClientId, sizeof(peerClientId), "%s", msg->peerClientId);
printf("Peer Client ID: \n%s\n", peerClientId);
sessionId = static_cast<uint16_t>(strtoul(peerClientId, nullptr, 0)); // base 0 auto-detects "0x"
printf("Session ID: %u\n", sessionId);
std::string unescaped_msg = json_unescape(std::string(sdp_buf));
ESP_LOGD(TAG, "unescaped msg: \n%s\n", unescaped_msg.c_str());
if (msg->messageType == SIGNALING_MSG_TYPE_OFFER)
{
WebRTCTransportProvider::Delegate & delegateRef = gCameraDevice.GetWebRTCProviderDelegate();
auto * webrtcMgr = static_cast<WebRTCProviderManager *>(&delegateRef);
if (webrtcMgr != nullptr)
{
WebrtcTransport * transport = webrtcMgr->GetTransport(sessionId);
if (transport != nullptr)
{
transport->OnLocalDescription(unescaped_msg, SDPType::Offer);
printf("Set SDP Offer to WebRTCProviderManager\n");
}
else
{
printf("Transport is not found for sessionID: %u\n", sessionId);
}
}
else
{
printf("Delegate is not of type WebRTCProviderManager\n");
}
}
else if (msg->messageType == SIGNALING_MSG_TYPE_ANSWER)
{
WebRTCTransportProvider::Delegate & delegateRef = gCameraDevice.GetWebRTCProviderDelegate();
auto * webrtcMgr = static_cast<WebRTCProviderManager *>(&delegateRef);
if (webrtcMgr != nullptr)
{
WebrtcTransport * transport = webrtcMgr->GetTransport(sessionId);
if (transport != nullptr)
{
transport->OnLocalDescription(unescaped_msg, SDPType::Answer);
printf("Set SDP Answer to WebRTCProviderManager\n");
}
else
{
printf("Transport is not found for sessionID: %u\n", sessionId);
}
printf("Set SDP Answer to WebRTCProviderManager\n");
}
else
{
printf("Delegate is not of type WebRTCProviderManager\n");
}
}
else if (msg->messageType == SIGNALING_MSG_TYPE_ICE_CANDIDATE)
{
WebRTCTransportProvider::Delegate & delegateRef = gCameraDevice.GetWebRTCProviderDelegate();
auto * webrtcMgr = static_cast<WebRTCProviderManager *>(&delegateRef);
if (webrtcMgr != nullptr)
{
WebrtcTransport * transport = webrtcMgr->GetTransport(sessionId);
if (transport != nullptr)
{
transport->OnICECandidate(unescaped_msg); // todo: session id based
printf("Set Candidate to WebRTCProviderManager\n");
}
else
{
printf("Transport is not found for sessionID: %u\n", sessionId);
}
printf("Set Candidate to WebRTCProviderManager\n");
}
else
{
printf("Delegate is not of type WebRTCProviderManager\n");
}
}
}
cleanup:
if (msg->payload)
{
free(msg->payload);
}
}
std::string generateMonotonicPeerConnectionId()
{
// Atomically get the current ID and increment it
unsigned int id = peerConnectionCounter.fetch_add(1, std::memory_order_relaxed);
// Convert the ID to a zero-padded 8-digit hex string
std::ostringstream oss;
oss << "0x" << std::setw(8) << std::setfill('0') << std::hex << id;
return oss.str();
}
@@ -0,0 +1,24 @@
/*
*
* Copyright (c) 2025 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <string>
std::string generateMonotonicPeerConnectionId();
std::string json_escape(const std::string & input);
void webrtc_bridge_message_received_cb(void * data, int len);
@@ -0,0 +1,157 @@
#include "webrtc-abstract.h"
#include <lib/support/logging/CHIPLogging.h>
#include <webrtc-transport.h>
WebrtcTransport::WebrtcTransport() {
ChipLogProgress(Camera, "WebrtcTransport created");
mRequestArgs = {0, 0, 0, 0, 0, 0}; // Initialize request arguments to zero
}
WebrtcTransport::~WebrtcTransport() {
ClosePeerConnection();
ChipLogProgress(Camera, "WebrtcTransport destroyed for sessionID: [%u]",
mRequestArgs.sessionId);
}
void WebrtcTransport::SetCallbacks(
OnTransportLocalDescriptionCallback onLocalDescription,
OnTransportConnectionStateCallback onConnectionState) {
mOnLocalDescription = onLocalDescription;
mOnConnectionState = onConnectionState;
}
void WebrtcTransport::SetRequestArgs(const RequestArgs &args) {
mRequestArgs = args;
}
WebrtcTransport::RequestArgs &WebrtcTransport::GetRequestArgs() {
return mRequestArgs;
}
const char *WebrtcTransport::GetStateStr() const {
switch (mState) {
case State::Idle:
return "Idle";
case State::SendingOffer:
return "SendingOffer";
case State::SendingAnswer:
return "SendingAnswer";
case State::SendingICECandidates:
return "SendingICECandidates";
case State::SendingEnd:
return "SendingEnd";
}
return "N/A";
}
void WebrtcTransport::MoveToState(const State targetState) {
mState = targetState;
ChipLogProgress(Camera, "WebrtcTransport moving to [ %s ]", GetStateStr());
}
void WebrtcTransport::SetCommandType(const CommandType commandtype) {
mCommandType = commandtype;
}
void WebrtcTransport::Start() {
if (mPeerConnection.get()) {
ChipLogProgress(Camera, "Start, mPeerConnection is already created");
return;
}
mPeerConnection = CreateWebRTCPeerConnection();
mPeerConnection->SetCallbacks(
[this](const std::string &sdp, SDPType type) {
this->OnLocalDescription(sdp, type);
},
[this](const std::string &candidate) { this->OnICECandidate(candidate); },
[this](bool connected) { this->OnConnectionStateChanged(connected); },
[this](std::shared_ptr<WebRTCTrack> track) { this->OnTrack(track); });
}
void WebrtcTransport::Stop() {
mVideoTrack = nullptr;
mAudioTrack = nullptr;
if (mPeerConnection != nullptr) {
// KVSWebRTC close is handled by the KVSWebRTCManager.
// mPeerConnection->Close();
}
}
void WebrtcTransport::AddTracks() {
if (mPeerConnection != nullptr) {
mVideoTrack = mPeerConnection->AddTrack(MediaType::Video);
mAudioTrack = mPeerConnection->AddTrack(MediaType::Audio);
}
}
// Implementation of SetVideoTrack method
void WebrtcTransport::SetVideoTrack(std::shared_ptr<WebRTCTrack> videoTrack) {
ChipLogProgress(Camera, "Setting video track for sessionID: %u",
mRequestArgs.sessionId);
mVideoTrack = videoTrack;
}
// Implementation of SetAudioTrack method
void WebrtcTransport::SetAudioTrack(std::shared_ptr<WebRTCTrack> audioTrack) {
ChipLogProgress(Camera, "Setting audio track for sessionID: %u",
mRequestArgs.sessionId);
mAudioTrack = audioTrack;
}
void WebrtcTransport::AddRemoteCandidate(const std::string &candidate,
const std::string &mid) {
ChipLogProgress(Camera, "Adding remote candidate for sessionID: %u",
mRequestArgs.sessionId);
mPeerConnection->AddRemoteCandidate(candidate, mid);
}
// WebRTC Callbacks
void WebrtcTransport::OnLocalDescription(const std::string &sdp, SDPType type) {
ChipLogProgress(Camera, "Local description received for sessionID: %u",
mRequestArgs.sessionId);
mLocalSdp = sdp;
mLocalSdpType = type;
if (mOnLocalDescription)
mOnLocalDescription(sdp, type, mRequestArgs.sessionId);
}
bool WebrtcTransport::ClosePeerConnection() {
if (mPeerConnection == nullptr) {
return false;
}
// KVSWebRTC close is handled by the KVSWebRTCManager.
return true;
}
void WebrtcTransport::OnICECandidate(const std::string &candidate) {
ChipLogProgress(Camera, "ICE Candidate received for sessionID: %u",
mRequestArgs.sessionId);
mLocalCandidates.push_back(candidate);
ChipLogProgress(Camera, "Local Candidate:");
ChipLogProgress(Camera, "%s", candidate.c_str());
}
void WebrtcTransport::OnConnectionStateChanged(bool connected) {
ChipLogProgress(Camera, "Connection state changed for sessionID: %u",
mRequestArgs.sessionId);
if (mOnConnectionState)
mOnConnectionState(connected, mRequestArgs.sessionId);
}
void WebrtcTransport::OnTrack(std::shared_ptr<WebRTCTrack> track) {
ChipLogProgress(Camera, "Track received for sessionID: %u, type: %s",
mRequestArgs.sessionId, track->GetType().c_str());
if (track->GetType() == "video") {
ChipLogProgress(Camera, "Video track updated from remote peer");
SetVideoTrack(track);
} else if (track->GetType() == "audio") {
ChipLogProgress(Camera, "audio track updated from remote peer");
SetAudioTrack(track);
}
}
@@ -0,0 +1,112 @@
#pragma once
#include "webrtc-abstract.h"
#include <lib/core/DataModelTypes.h>
#include <lib/core/ScopedNodeId.h>
using OnTransportLocalDescriptionCallback = std::function<void(
const std::string &sdp, SDPType type, const int16_t sessionId)>;
using OnTransportConnectionStateCallback =
std::function<void(bool connected, const int16_t sessionId)>;
class WebrtcTransport {
public:
enum class CommandType : uint8_t {
kUndefined = 0,
kOffer = 1,
kAnswer = 2,
kICECandidates = 3,
kEnd = 4,
};
enum class State : uint8_t {
Idle, ///< Default state, no communication initiated yet
SendingOffer, ///< Sending Offer command from camera
SendingAnswer, ///< Sending Answer command from camera
SendingICECandidates, ///< Sending ICECandidates command from camera
SendingEnd, ///< Sending End command from camera
};
struct RequestArgs {
uint16_t sessionId;
uint16_t videoStreamId;
uint16_t audioStreamId;
chip::NodeId peerNodeId;
chip::FabricIndex fabricIndex;
chip::EndpointId originatingEndpointId;
chip::ScopedNodeId peerId;
};
WebrtcTransport();
~WebrtcTransport();
void SetCallbacks(OnTransportLocalDescriptionCallback onLocalDescription,
OnTransportConnectionStateCallback onConnectionState);
void MoveToState(const State targetState);
const char *GetStateStr() const;
State GetState() { return mState; }
// Takes care of creation WebRTC peer connection and registering the necessary
// callbacks
void Start();
// Stops WebRTC peer connection and cleanup
void Stop();
void AddTracks();
// Set video track for the transport
void SetVideoTrack(std::shared_ptr<WebRTCTrack> videoTrack);
// Set audio track for the transport
void SetAudioTrack(std::shared_ptr<WebRTCTrack> audioTrack);
std::shared_ptr<WebRTCPeerConnection> GetPeerConnection() {
return mPeerConnection;
}
std::string GetLocalDescription() { return mLocalSdp; }
void SetSdpAnswer(std::string localSdp) { mLocalSdp = localSdp; }
std::vector<std::string> GetCandidates() { return mLocalCandidates; }
void SetCandidates(std::vector<std::string> candidates) {
mLocalCandidates = candidates;
}
void AddRemoteCandidate(const std::string &candidate, const std::string &mid);
bool ClosePeerConnection();
void SetCommandType(const CommandType commandtype);
CommandType GetCommandType() { return mCommandType; }
// WebRTC Callbacks
void OnLocalDescription(const std::string &sdp, SDPType type);
void OnICECandidate(const std::string &candidate);
void OnConnectionStateChanged(bool connected);
void OnTrack(std::shared_ptr<WebRTCTrack> track);
void SetRequestArgs(const RequestArgs &args);
RequestArgs &GetRequestArgs();
private:
CommandType mCommandType = CommandType::kUndefined;
State mState = State::Idle;
std::shared_ptr<WebRTCPeerConnection> mPeerConnection;
std::shared_ptr<WebRTCTrack> mVideoTrack;
std::shared_ptr<WebRTCTrack> mAudioTrack;
std::string mLocalSdp;
SDPType mLocalSdpType;
std::vector<std::string> mLocalCandidates;
RequestArgs mRequestArgs;
OnTransportLocalDescriptionCallback mOnLocalDescription = nullptr;
OnTransportConnectionStateCallback mOnConnectionState = nullptr;
};
+10
View File
@@ -0,0 +1,10 @@
# Name, Type, SubType, Offset, Size, Flags
# Note: Firmware partition offset needs to be 64K aligned, initial 36K (9 sectors) are reserved for bootloader and partition table
esp_secure_cert, 0x3F, ,0xd000, 0x2000, encrypted
nvs, data, nvs, 0x10000, 0xC000,
nvs_keys, data, nvs_keys,, 0x1000, encrypted
otadata, data, ota, , 0x2000
phy_init, data, phy, , 0x1000,
ota_0, app, ota_0, 0x20000, 0x2E0000,
#ota_1, app, ota_1, 0x200000, 0x1E0000,
fctry, data, nvs, 0x3E0000, 0x6000
1 # Name, Type, SubType, Offset, Size, Flags
2 # Note: Firmware partition offset needs to be 64K aligned, initial 36K (9 sectors) are reserved for bootloader and partition table
3 esp_secure_cert, 0x3F, ,0xd000, 0x2000, encrypted
4 nvs, data, nvs, 0x10000, 0xC000,
5 nvs_keys, data, nvs_keys,, 0x1000, encrypted
6 otadata, data, ota, , 0x2000
7 phy_init, data, phy, , 0x1000,
8 ota_0, app, ota_0, 0x20000, 0x2E0000,
9 #ota_1, app, ota_1, 0x200000, 0x1E0000,
10 fctry, data, nvs, 0x3E0000, 0x6000
+288
View File
@@ -0,0 +1,288 @@
CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y
#enable BT
CONFIG_BT_ENABLED=y
CONFIG_BT_NIMBLE_ENABLED=y
#disable BT connection reattempt
CONFIG_BT_NIMBLE_ENABLE_CONN_REATTEMPT=n
#enable lwip ipv6 autoconfig
CONFIG_LWIP_IPV6_AUTOCONFIG=y
# Use a custom partition table
CONFIG_PARTITION_TABLE_CUSTOM=y
CONFIG_PARTITION_TABLE_FILENAME="partitions.csv"
CONFIG_PARTITION_TABLE_OFFSET=0xC000
# Enable chip shell
CONFIG_ENABLE_CHIP_SHELL=y
CONFIG_ENABLE_WIFI_AP=n
# Enable sntp time sync
CONFIG_ENABLE_SNTP_TIME_SYNC=y
#enable lwIP route hooks
CONFIG_LWIP_HOOK_IP6_ROUTE_DEFAULT=y
CONFIG_LWIP_HOOK_ND6_GET_GW_DEFAULT=y
# Button
CONFIG_BUTTON_PERIOD_TIME_MS=20
CONFIG_BUTTON_LONG_PRESS_TIME_MS=5000
# disable softap by default
CONFIG_ESP_WIFI_SOFTAP_SUPPORT=n
# Enable OTA Requestor
CONFIG_ENABLE_OTA_REQUESTOR=y
# Enable HKDF in mbedtls
CONFIG_MBEDTLS_HKDF_C=y
# Increase LwIP IPv6 address number to 6 (MAX_FABRIC + 1)
# unique local addresses for fabrics(MAX_FABRIC), a link local address(1)
CONFIG_LWIP_IPV6_NUM_ADDRESSES=6
# Exclude unused clusters to optimize flash and memory usage
CONFIG_SUPPORT_ACCOUNT_LOGIN_CLUSTER=n
CONFIG_SUPPORT_ACTIVATED_CARBON_FILTER_MONITORING_CLUSTER=n
CONFIG_SUPPORT_AIR_QUALITY_CLUSTER=n
CONFIG_SUPPORT_APPLICATION_BASIC_CLUSTER=n
CONFIG_SUPPORT_APPLICATION_LAUNCHER_CLUSTER=n
CONFIG_SUPPORT_AUDIO_OUTPUT_CLUSTER=n
CONFIG_SUPPORT_BOOLEAN_STATE_CONFIGURATION_CLUSTER=n
CONFIG_SUPPORT_BRIDGED_DEVICE_BASIC_INFORMATION_CLUSTER=n
CONFIG_SUPPORT_CARBON_DIOXIDE_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_CARBON_MONOXIDE_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_CHANNEL_CLUSTER=n
CONFIG_SUPPORT_CHIME_CLUSTER=n
CONFIG_SUPPORT_COMMISSIONER_CONTROL_CLUSTER=n
CONFIG_SUPPORT_CONTENT_LAUNCHER_CLUSTER=n
CONFIG_SUPPORT_CONTENT_CONTROL_CLUSTER=n
CONFIG_SUPPORT_CONTENT_APP_OBSERVER_CLUSTER=n
CONFIG_SUPPORT_DEVICE_ENERGY_MANAGEMENT_CLUSTER=n
CONFIG_SUPPORT_DEVICE_ENERGY_MANAGEMENT_MODE_CLUSTER=n
CONFIG_SUPPORT_DIAGNOSTIC_LOGS_CLUSTER=n
CONFIG_SUPPORT_DISHWASHER_ALARM_CLUSTER=n
CONFIG_SUPPORT_DISHWASHER_MODE_CLUSTER=n
CONFIG_SUPPORT_MICROWAVE_OVEN_MODE_CLUSTER=n
CONFIG_SUPPORT_DOOR_LOCK_CLUSTER=n
CONFIG_SUPPORT_ECOSYSTEM_INFORMATION_CLUSTER=n
CONFIG_SUPPORT_ELECTRICAL_ENERGY_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_ELECTRICAL_POWER_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_ENERGY_EVSE_CLUSTER=n
CONFIG_SUPPORT_ENERGY_EVSE_MODE_CLUSTER=n
CONFIG_SUPPORT_ENERGY_PREFERENCE_CLUSTER=n
CONFIG_SUPPORT_FAN_CONTROL_CLUSTER=n
CONFIG_SUPPORT_FAULT_INJECTION_CLUSTER=n
CONFIG_SUPPORT_FIXED_LABEL_CLUSTER=n
CONFIG_SUPPORT_FORMALDEHYDE_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_HEPA_FILTER_MONITORING_CLUSTER=n
CONFIG_SUPPORT_ICD_MANAGEMENT_CLUSTER=n
CONFIG_SUPPORT_KEYPAD_INPUT_CLUSTER=n
CONFIG_SUPPORT_LAUNDRY_WASHER_MODE_CLUSTER=n
CONFIG_SUPPORT_LOCALIZATION_CONFIGURATION_CLUSTER=n
CONFIG_SUPPORT_LOW_POWER_CLUSTER=n
CONFIG_SUPPORT_MEDIA_INPUT_CLUSTER=n
CONFIG_SUPPORT_MEDIA_PLAYBACK_CLUSTER=n
CONFIG_SUPPORT_MICROWAVE_OVEN_CONTROL_CLUSTER=n
CONFIG_SUPPORT_MESSAGES_CLUSTER=n
CONFIG_SUPPORT_MODE_SELECT_CLUSTER=n
CONFIG_SUPPORT_NITROGEN_DIOXIDE_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_SAMPLE_MEI_CLUSTER=n
CONFIG_SUPPORT_OCCUPANCY_SENSING_CLUSTER=n
CONFIG_SUPPORT_POWER_TOPOLOGY_CLUSTER=n
CONFIG_SUPPORT_OPERATIONAL_STATE_CLUSTER=n
CONFIG_SUPPORT_OPERATIONAL_STATE_OVEN_CLUSTER=n
CONFIG_SUPPORT_OPERATIONAL_STATE_RVC_CLUSTER=n
CONFIG_SUPPORT_OVEN_MODE_CLUSTER=n
CONFIG_SUPPORT_OZONE_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_PM10_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_PM1_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_PM2_5_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_POWER_SOURCE_CLUSTER=n
CONFIG_SUPPORT_POWER_SOURCE_CONFIGURATION_CLUSTER=n
CONFIG_SUPPORT_PUMP_CONFIGURATION_AND_CONTROL_CLUSTER=n
CONFIG_SUPPORT_RADON_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_REFRIGERATOR_ALARM_CLUSTER=n
CONFIG_SUPPORT_REFRIGERATOR_AND_TEMPERATURE_CONTROLLED_CABINET_MODE_CLUSTER=n
CONFIG_SUPPORT_RVC_CLEAN_MODE_CLUSTER=n
CONFIG_SUPPORT_RVC_RUN_MODE_CLUSTER=n
CONFIG_SUPPORT_SERVICE_AREA_CLUSTER=n
CONFIG_SUPPORT_SMOKE_CO_ALARM_CLUSTER=n
CONFIG_SUPPORT_SOFTWARE_DIAGNOSTICS_CLUSTER=n
CONFIG_SUPPORT_SWITCH_CLUSTER=n
CONFIG_SUPPORT_TARGET_NAVIGATOR_CLUSTER=n
CONFIG_SUPPORT_TEMPERATURE_CONTROL_CLUSTER=n
CONFIG_SUPPORT_THERMOSTAT_CLUSTER=n
CONFIG_SUPPORT_THERMOSTAT_USER_INTERFACE_CONFIGURATION_CLUSTER=n
CONFIG_SUPPORT_THREAD_BORDER_ROUTER_MANAGEMENT_CLUSTER=n
CONFIG_SUPPORT_THREAD_NETWORK_DIRECTORY_CLUSTER=n
CONFIG_SUPPORT_TIME_FORMAT_LOCALIZATION_CLUSTER=n
CONFIG_SUPPORT_TIME_SYNCHRONIZATION_CLUSTER=y
CONFIG_SUPPORT_TIMER_CLUSTER=n
CONFIG_SUPPORT_TVOC_CONCENTRATION_MEASUREMENT_CLUSTER=n
CONFIG_SUPPORT_UNIT_TESTING_CLUSTER=n
CONFIG_SUPPORT_USER_LABEL_CLUSTER=n
CONFIG_SUPPORT_VALVE_CONFIGURATION_AND_CONTROL_CLUSTER=n
CONFIG_SUPPORT_WAKE_ON_LAN_CLUSTER=n
CONFIG_SUPPORT_LAUNDRY_WASHER_CONTROLS_CLUSTER=n
CONFIG_SUPPORT_LAUNDRY_DRYER_CONTROLS_CLUSTER=n
CONFIG_SUPPORT_WIFI_NETWORK_MANAGEMENT_CLUSTER=n
CONFIG_SUPPORT_WINDOW_COVERING_CLUSTER=n
CONFIG_SUPPORT_WATER_HEATER_MANAGEMENT_CLUSTER=n
CONFIG_SUPPORT_WATER_HEATER_MODE_CLUSTER=n
CONFIG_SUPPORT_CLOSURE_DIMENSION_CLUSTER=n
CONFIG_SUPPORT_CLOSURE_CONTROL_CLUSTER=n
CONFIG_SUPPORT_COMMODITY_TARIFF_CLUSTER=n
#Singnling Specific config
# Signalling only defaults to C6
CONFIG_IDF_TARGET="esp32c6"
CONFIG_IDF_TARGET_ESP32C6=y
#
# Serial flasher config
#
CONFIG_ESPTOOLPY_FLASHMODE_QIO=y
CONFIG_ESPTOOLPY_FLASHFREQ_80M=y
CONFIG_ESPTOOLPY_FLASHSIZE_4MB=y
#
# Partition Table
#
#CONFIG_PARTITION_TABLE_CUSTOM=y
#CONFIG_PARTITION_TABLE_CUSTOM_FILENAME="webrtc_example.csv"
#CONFIG_PARTITION_TABLE_FILENAME="webrtc_example.csv"
#CONFIG_PARTITION_TABLE_OFFSET=0x8000
#
# Example Configuration
#
CONFIG_ESP_WIFI_SSID="myssid"
CONFIG_ESP_WIFI_PASSWORD="mypassword"
CONFIG_ESP_MAXIMUM_RETRY=5
CONFIG_EXAMPLE_SDMMC_BUS_WIDTH_1=y
CONFIG_EXAMPLE_PIN_CMD=35
CONFIG_EXAMPLE_PIN_CLK=36
CONFIG_EXAMPLE_PIN_D0=37
#
# ESP32-Specific
#
CONFIG_ESP32_DEFAULT_CPU_FREQ_240=y
CONFIG_ESP32_DEFAULT_CPU_FREQ_MHZ=240
#
# SPI RAM config
#
CONFIG_SPIRAM=y
CONFIG_SPIRAM_SUPPORT=y
CONFIG_ESP32_SPIRAM_SUPPORT=y
CONFIG_WIFI_LWIP_ALLOCATION_FROM_SPIRAM_FIRST=y
CONFIG_SPIRAM_USE_MALLOC=y
CONFIG_SPIRAM_MALLOC_ALWAYSINTERNAL=0
CONFIG_SPIRAM_TRY_ALLOCATE_WIFI_LWIP=y
CONFIG_SPIRAM_MALLOC_RESERVE_INTERNAL=32768
CONFIG_SPIRAM_ALLOW_BSS_SEG_EXTERNAL_MEMORY=y
CONFIG_SPIRAM_BANKSWITCH_ENABLE=y
CONFIG_SPIRAM_BANKSWITCH_RESERVE=8
CONFIG_SPIRAM_ALLOW_STACK_EXTERNAL_MEMORY=y
CONFIG_SPIRAM_SPEED_80M=y
# 1. The SPIRAM test is only needed for custom SPIRAM
# 2. Needed only in development phase
# Disabling this save us around 1 seconds at startup
CONFIG_SPIRAM_MEMTEST=n
CONFIG_ESP32_WIFI_STATIC_TX_BUFFER=y
CONFIG_ESP32_WIFI_TX_BUFFER_TYPE=0
CONFIG_ESP32_WIFI_STATIC_TX_BUFFER_NUM=16
CONFIG_FATFS_LFN_HEAP=y
CONFIG_FATFS_MAX_LFN=255
CONFIG_FATFS_API_ENCODING_ANSI_OEM=y
CONFIG_FATFS_FS_LOCK=10
CONFIG_FATFS_ALLOC_PREFER_EXTRAM=y
CONFIG_LOG_DEFAULT_LEVEL=3
#
# SNTP
#
CONFIG_LWIP_SNTP_MAX_SERVERS=3
#CONFIG_LWIP_SO_LINGER=y
#CONFIG_LWIP_SO_RCVBUF=y
#CONFIG_LWIP_NETBUF_RECVINFO=y
#CONFIG_LWIP_IP_FRAG=y
#CONFIG_LWIP_STATS=y
#CONFIG_LWIP_MULTICAST_PING=y
#CONFIG_LWIP_BROADCAST_PING=y
CONFIG_LWIP_DHCP_MAX_NTP_SERVERS=1
CONFIG_MBEDTLS_EXTERNAL_MEM_ALLOC=y
CONFIG_MBEDTLS_DEBUG=y
CONFIG_MBEDTLS_DEBUG_LEVEL_WARN=y
CONFIG_MBEDTLS_DEBUG_LEVEL=1
#CONFIG_SEMIHOSTFS_MAX_MOUNT_POINTS=1
#CONFIG_SEMIHOSTFS_HOST_PATH_MAX_LEN=128
CONFIG_MBEDTLS_SSL_PROTO_DTLS=y
CONFIG_MBEDTLS_SSL_ALPN=y
#
# ESP Ringbuf
#
#CONFIG_RINGBUF_PLACE_FUNCTIONS_INTO_FLASH=y
#CONFIG_RINGBUF_PLACE_ISR_FUNCTIONS_INTO_FLASH=y
#
# FreeRTOS
#
CONFIG_FREERTOS_PLACE_SNAPSHOT_FUNS_INTO_FLASH=y
#
# ESP System Settings
#
CONFIG_ESP_SYSTEM_ALLOW_RTC_FAST_MEM_AS_HEAP=y
#
# PThreads
#
CONFIG_PTHREAD_TASK_STACK_SIZE_DEFAULT=4096
CONFIG_PTHREAD_STACK_MIN=4096
CONFIG_MAIN_TASK_STACK_SIZE=12000
CONFIG_IPC_TASK_STACK_SIZE=1024
CONFIG_TIMER_TASK_STACK_SIZE=3584
CONFIG_MB_SERIAL_TASK_STACK_SIZE=2048
CONFIG_SUPPORT_STATIC_ALLOCATION=y
#
# Wi-Fi
#
# CONFIG_ESP32_WIFI_STATIC_RX_BUFFER_NUM=16
CONFIG_ESP32_WIFI_IRAM_OPT=n
CONFIG_ESP32_WIFI_RX_IRAM_OPT=n
CONFIG_ESP_SYSTEM_EVENT_TASK_STACK_SIZE=4000
CONFIG_DIAG_USE_EXTERNAL_LOG_WRAP=y
#
# TCP
#
CONFIG_ENABLE_TCP_TRANSPORT=y
CONFIG_MAX_LARGE_BUFFER_SIZE_BYTES=5120
@@ -0,0 +1,37 @@
CONFIG_IDF_TARGET="esp32c6"
#Singnling Specific config
# Slave
CONFIG_SLAVE_LWIP_ENABLED=y
CONFIG_LWIP_TCP_LOCAL_PORT_RANGE_START=61440
CONFIG_LWIP_TCP_LOCAL_PORT_RANGE_END=65535
CONFIG_LWIP_UDP_LOCAL_PORT_RANGE_START=61440
CONFIG_LWIP_UDP_LOCAL_PORT_RANGE_END=65535
CONFIG_ESP_DEFAULT_LWIP_SLAVE=y
CONFIG_FREERTOS_USE_TRACE_FACILITY=y
CONFIG_FREERTOS_USE_STATS_FORMATTING_FUNCTIONS=y
# CONFIG_FREERTOS_USE_LIST_DATA_INTEGRITY_CHECK_BYTES is not set
CONFIG_FREERTOS_VTASKLIST_INCLUDE_COREID=y
CONFIG_FREERTOS_GENERATE_RUN_TIME_STATS=y
# 6K is sufficient for signalling_only
CONFIG_MAIN_TASK_STACK_SIZE=4000
# OS
CONFIG_FREERTOS_HZ=1000
CONFIG_FREERTOS_PLACE_FUNCTIONS_INTO_FLASH=y
CONFIG_ESP_WIFI_EXTRA_IRAM_OPT=n
CONFIG_LWIP_TCPIP_RECVMBOX_SIZE=24
CONFIG_LWIP_TCP_OVERSIZE_QUARTER_MSS=y
CONFIG_LWIP_TCP_ACCEPTMBOX_SIZE=4
CONFIG_ESP_WEBRTC_BRIDGE_HOSTED=y
# DANGER: Do not touch me!!!
# CONFIG_HOSTED_ON_LOW_MEM=y
CONFIG_ESP_CACHE_MALLOC=y
CONFIG_ESP_PKT_STATS=y