mirror of
https://github.com/espressif/esp-matter.git
synced 2026-04-27 19:13:13 +00:00
Merge branch 'controller/more_clusters_support' into 'main'
controller: Add support for Identify, Scenes, Thermostat, and DoorLock cluster See merge request app-frameworks/esp-matter!481
This commit is contained in:
@@ -50,7 +50,8 @@ esp_err_t set_command_callback(command_callback_t callback, group_command_callba
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
void esp_matter_connection_success_callback(void *context, ExchangeManager &exchangeMgr, const SessionHandle &sessionHandle)
|
||||
void esp_matter_connection_success_callback(void *context, ExchangeManager &exchangeMgr,
|
||||
const SessionHandle &sessionHandle)
|
||||
{
|
||||
command_handle_t *cmd_handle = static_cast<command_handle_t *>(context);
|
||||
if (!cmd_handle) {
|
||||
@@ -191,7 +192,7 @@ static void send_command_success_callback(void *context, const chip::app::DataMo
|
||||
|
||||
static void send_command_failure_callback(void *context, CHIP_ERROR error)
|
||||
{
|
||||
ESP_LOGI(TAG, "FSend command failure");
|
||||
ESP_LOGI(TAG, "Send command failure: err: %" CHIP_ERROR_FORMAT, error.Format());
|
||||
}
|
||||
#if CONFIG_ESP_MATTER_ENABLE_DATA_MODEL
|
||||
namespace on_off {
|
||||
@@ -961,13 +962,28 @@ esp_err_t group_send_identify(uint8_t fabric_index, uint16_t group_id, uint16_t
|
||||
chip::Controller::InvokeGroupCommandRequest(&exchange_mgr, fabric_index, group_id, command_data);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_trigger_effect(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t effect_identifier,
|
||||
uint8_t effect_variant)
|
||||
{
|
||||
Identify::Commands::TriggerEffect::Type command_data;
|
||||
command_data.effectIdentifier = Identify::EffectIdentifierEnum(effect_identifier);
|
||||
command_data.effectVariant = Identify::EffectVariantEnum(effect_variant);
|
||||
|
||||
chip::Controller::IdentifyCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
} // namespace command
|
||||
} // namespace identify
|
||||
|
||||
namespace group_key_management {
|
||||
namespace command {
|
||||
|
||||
esp_err_t send_keyset_write(peer_device_t *remote_device, uint16_t remote_endpoint_id, group_keyset_struct group_keyset)
|
||||
esp_err_t send_keyset_write(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
group_keyset_struct &group_keyset)
|
||||
{
|
||||
GroupKeyManagement::Commands::KeySetWrite::Type command_data;
|
||||
command_data.groupKeySet = group_keyset;
|
||||
@@ -1036,6 +1052,436 @@ esp_err_t send_remove_group(peer_device_t *remote_device, uint16_t remote_endpoi
|
||||
} // namespace command
|
||||
} // namespace groups
|
||||
|
||||
namespace scenes {
|
||||
namespace command {
|
||||
|
||||
esp_err_t send_add_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id, uint8_t scene_id,
|
||||
uint16_t transition_time, char *scene_name, extension_field_sets &efs,
|
||||
add_scene_callback add_scene_cb)
|
||||
{
|
||||
Scenes::Commands::AddScene::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
command_data.sceneID = scene_id;
|
||||
command_data.transitionTime = transition_time;
|
||||
command_data.sceneName = chip::CharSpan(scene_name, strnlen(scene_name, 16));
|
||||
command_data.extensionFieldSets = efs;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, add_scene_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_view_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id, view_scene_callback view_scene_cb)
|
||||
{
|
||||
Scenes::Commands::ViewScene::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
command_data.sceneID = scene_id;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, view_scene_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_remove_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id, remove_scene_callback remove_scene_cb)
|
||||
{
|
||||
Scenes::Commands::RemoveScene::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
command_data.sceneID = scene_id;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, remove_scene_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_remove_all_scenes(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
remove_all_scenes_callback remove_all_scenes_cb)
|
||||
{
|
||||
Scenes::Commands::RemoveAllScenes::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, remove_all_scenes_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_store_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id, store_scene_callback store_scene_cb)
|
||||
{
|
||||
Scenes::Commands::StoreScene::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
command_data.sceneID = scene_id;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, store_scene_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_recall_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id)
|
||||
{
|
||||
Scenes::Commands::RecallScene::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
command_data.sceneID = scene_id;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_scene_membership(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
get_scene_membership_callback get_scene_membership_cb)
|
||||
{
|
||||
Scenes::Commands::GetSceneMembership::Type command_data;
|
||||
command_data.groupID = group_id;
|
||||
|
||||
chip::Controller::ScenesCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, get_scene_membership_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
} // namespace command
|
||||
} // namespace scenes
|
||||
|
||||
namespace thermostat {
|
||||
namespace command {
|
||||
|
||||
esp_err_t send_setpoint_raise_lower(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t mode,
|
||||
uint8_t amount)
|
||||
{
|
||||
Thermostat::Commands::SetpointRaiseLower::Type command_data;
|
||||
command_data.mode = chip::app::Clusters::Thermostat::SetpointAdjustMode(mode);
|
||||
command_data.amount = amount;
|
||||
|
||||
chip::Controller::ThermostatCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_set_weekly_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
uint8_t num_of_tras_for_seq, uint8_t day_of_week_for_seq, uint8_t mode_for_seq,
|
||||
transitions &trans)
|
||||
{
|
||||
Thermostat::Commands::SetWeeklySchedule::Type command_data;
|
||||
command_data.numberOfTransitionsForSequence = num_of_tras_for_seq;
|
||||
command_data.dayOfWeekForSequence.SetRaw(day_of_week_for_seq);
|
||||
command_data.modeForSequence.SetRaw(mode_for_seq);
|
||||
command_data.transitions = trans;
|
||||
|
||||
chip::Controller::ThermostatCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_weekly_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t day_to_return,
|
||||
uint8_t mode_to_return, get_weekly_schedule_callback get_weekly_schedule_cb)
|
||||
{
|
||||
Thermostat::Commands::GetWeeklySchedule::Type command_data;
|
||||
command_data.daysToReturn.SetRaw(day_to_return);
|
||||
command_data.modeToReturn.SetRaw(mode_to_return);
|
||||
|
||||
chip::Controller::ThermostatCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, get_weekly_schedule_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_clear_weekly_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id)
|
||||
{
|
||||
Thermostat::Commands::ClearWeeklySchedule::Type command_data;
|
||||
|
||||
chip::Controller::ThermostatCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
} // namespace command
|
||||
} // namespace thermostat
|
||||
|
||||
namespace door_lock {
|
||||
namespace command {
|
||||
|
||||
esp_err_t send_lock_door(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::LockDoor::Type command_data;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_unlock_door(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::UnlockDoor::Type command_data;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_unlock_with_timeout(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t timeout,
|
||||
uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::UnlockWithTimeout::Type command_data;
|
||||
command_data.timeout = timeout;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_set_week_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t week_day_index,
|
||||
uint16_t user_index, uint8_t days_mask, uint8_t start_hour, uint8_t start_minute,
|
||||
uint8_t end_hour, uint8_t end_minute)
|
||||
{
|
||||
DoorLock::Commands::SetWeekDaySchedule::Type command_data;
|
||||
command_data.weekDayIndex = week_day_index;
|
||||
command_data.userIndex = user_index;
|
||||
command_data.daysMask.SetRaw(days_mask);
|
||||
command_data.startHour = start_hour;
|
||||
command_data.startMinute = start_minute;
|
||||
command_data.endHour = end_hour;
|
||||
command_data.endMinute = end_minute;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_week_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t week_day_index,
|
||||
uint16_t user_index, get_week_day_schedule_callback success_cb)
|
||||
{
|
||||
DoorLock::Commands::GetWeekDaySchedule::Type command_data;
|
||||
command_data.weekDayIndex = week_day_index;
|
||||
command_data.userIndex = user_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, success_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_clear_week_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
uint8_t week_day_index, uint16_t user_index)
|
||||
{
|
||||
DoorLock::Commands::ClearWeekDaySchedule::Type command_data;
|
||||
command_data.weekDayIndex = week_day_index;
|
||||
command_data.userIndex = user_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_set_year_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t year_day_index,
|
||||
uint16_t user_index, uint32_t local_start_time, uint32_t local_end_time)
|
||||
{
|
||||
DoorLock::Commands::SetYearDaySchedule::Type command_data;
|
||||
command_data.yearDayIndex = year_day_index;
|
||||
command_data.userIndex = user_index;
|
||||
command_data.localStartTime = local_start_time;
|
||||
command_data.localEndTime = local_end_time;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_year_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t year_day_index,
|
||||
uint16_t user_index, get_year_day_schedule_callback success_cb)
|
||||
{
|
||||
DoorLock::Commands::GetYearDaySchedule::Type command_data;
|
||||
command_data.yearDayIndex = year_day_index;
|
||||
command_data.userIndex = user_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, success_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_clear_year_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
uint8_t year_day_index, uint16_t user_index)
|
||||
{
|
||||
DoorLock::Commands::ClearYearDaySchedule::Type command_data;
|
||||
command_data.yearDayIndex = year_day_index;
|
||||
command_data.userIndex = user_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_set_holiday_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t holiday_index,
|
||||
uint32_t local_start_time, uint32_t local_end_time, uint8_t operating_mode)
|
||||
{
|
||||
DoorLock::Commands::SetHolidaySchedule::Type command_data;
|
||||
command_data.holidayIndex = holiday_index;
|
||||
command_data.localStartTime = local_start_time;
|
||||
command_data.localEndTime = local_end_time;
|
||||
command_data.operatingMode = DoorLock::OperatingModeEnum(operating_mode);
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_holiday_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t holiday_index,
|
||||
get_holiday_schedule_callback success_cb)
|
||||
{
|
||||
DoorLock::Commands::GetHolidaySchedule::Type command_data;
|
||||
command_data.holidayIndex = holiday_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, success_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_clear_holiday_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t holiday_index)
|
||||
{
|
||||
DoorLock::Commands::ClearHolidaySchedule::Type command_data;
|
||||
command_data.holidayIndex = holiday_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_set_user(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t operation_type,
|
||||
uint16_t user_index, char *user_name, uint32_t user_unique_id, uint8_t user_status,
|
||||
uint8_t user_type, uint8_t credential_rule, uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::SetUser::Type command_data;
|
||||
command_data.operationType = DoorLock::DataOperationTypeEnum(operation_type);
|
||||
command_data.userIndex = user_index;
|
||||
if (user_name) {
|
||||
command_data.userName = chip::app::DataModel::MakeNullable(chip::CharSpan(user_name, strlen(user_name)));
|
||||
}
|
||||
command_data.userUniqueID = chip::app::DataModel::MakeNullable(user_unique_id);
|
||||
command_data.userStatus = chip::app::DataModel::MakeNullable(DoorLock::UserStatusEnum(user_status));
|
||||
command_data.userType = chip::app::DataModel::MakeNullable(DoorLock::UserTypeEnum(user_type));
|
||||
command_data.credentialRule = chip::app::DataModel::MakeNullable(DoorLock::CredentialRuleEnum(credential_rule));
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_user(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t user_index,
|
||||
get_user_callback success_cb)
|
||||
{
|
||||
DoorLock::Commands::GetUser::Type command_data;
|
||||
command_data.userIndex = user_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, success_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_clear_user(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t user_index,
|
||||
uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::ClearUser::Type command_data;
|
||||
command_data.userIndex = user_index;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_set_credential(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t operation_type,
|
||||
credential_struct credential, uint8_t *credential_data, size_t credential_len,
|
||||
uint16_t user_index, uint8_t user_status, uint8_t user_type,
|
||||
set_credential_callback success_cb, uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::SetCredential::Type command_data;
|
||||
command_data.operationType = DoorLock::DataOperationTypeEnum(operation_type);
|
||||
command_data.credential = credential;
|
||||
command_data.credentialData = chip::ByteSpan(credential_data, credential_len);
|
||||
command_data.userIndex = chip::app::DataModel::MakeNullable(user_index);
|
||||
command_data.userStatus = chip::app::DataModel::MakeNullable(DoorLock::UserStatusEnum(user_status));
|
||||
command_data.userType = chip::app::DataModel::MakeNullable(DoorLock::UserTypeEnum(user_type));
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, success_cb, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_get_credential_status(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
credential_struct &credential, get_credential_status_callback success_cb)
|
||||
{
|
||||
DoorLock::Commands::GetCredentialStatus::Type command_data;
|
||||
command_data.credential = credential;
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, success_cb, send_command_failure_callback);
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_clear_credential(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
credential_struct &credential, uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::ClearCredential::Type command_data;
|
||||
command_data.credential = chip::app::DataModel::MakeNullable(credential);
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
}
|
||||
|
||||
esp_err_t send_unbolt_door(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t *pin_code,
|
||||
size_t pin_code_len, uint16_t timed_invoke_timeout_ms)
|
||||
{
|
||||
DoorLock::Commands::UnboltDoor::Type command_data;
|
||||
if (pin_code) {
|
||||
command_data.PINCode = chip::MakeOptional(chip::ByteSpan(pin_code, pin_code_len));
|
||||
}
|
||||
|
||||
chip::Controller::DoorLockCluster cluster(*remote_device->GetExchangeManager(),
|
||||
remote_device->GetSecureSession().Value(), remote_endpoint_id);
|
||||
cluster.InvokeCommand(command_data, NULL, send_command_success_callback, send_command_failure_callback,
|
||||
chip::MakeOptional(timed_invoke_timeout_ms));
|
||||
return ESP_OK;
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
} // namespace command
|
||||
} // namespace door_lock
|
||||
|
||||
namespace window_covering {
|
||||
namespace command {
|
||||
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <app-common/zap-generated/cluster-objects.h>
|
||||
#include <esp_err.h>
|
||||
#include <esp_matter_core.h>
|
||||
#include <app-common/zap-generated/cluster-objects.h>
|
||||
|
||||
namespace esp_matter {
|
||||
namespace cluster {
|
||||
@@ -154,6 +154,9 @@ namespace command {
|
||||
esp_err_t send_identify(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t identify_time);
|
||||
|
||||
esp_err_t group_send_identify(uint8_t fabric_index, uint16_t group_id, uint16_t identify_time);
|
||||
|
||||
esp_err_t send_trigger_effect(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t effect_identifier,
|
||||
uint8_t effect_variant);
|
||||
} // namespace command
|
||||
} // namespace identify
|
||||
|
||||
@@ -164,7 +167,7 @@ using keyset_read_callback =
|
||||
void (*)(void *, const chip::app::Clusters::GroupKeyManagement::Commands::KeySetRead::Type::ResponseType &);
|
||||
|
||||
esp_err_t send_keyset_write(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
group_keyset_struct group_keyset);
|
||||
group_keyset_struct &group_keyset);
|
||||
|
||||
esp_err_t send_keyset_read(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t keyset_id,
|
||||
keyset_read_callback read_callback);
|
||||
@@ -193,6 +196,156 @@ esp_err_t send_remove_group(peer_device_t *remote_device, uint16_t remote_endpoi
|
||||
} // namespace command
|
||||
} // namespace groups
|
||||
|
||||
namespace scenes {
|
||||
namespace command {
|
||||
|
||||
using extension_field_sets = chip::app::DataModel::List<chip::app::Clusters::Scenes::Structs::ExtensionFieldSet::Type>;
|
||||
using add_scene_callback = void (*)(void *,
|
||||
const chip::app::Clusters::Scenes::Commands::AddScene::Type::ResponseType &);
|
||||
using view_scene_callback = void (*)(void *,
|
||||
const chip::app::Clusters::Scenes::Commands::ViewScene::Type::ResponseType &);
|
||||
using remove_scene_callback = void (*)(void *,
|
||||
const chip::app::Clusters::Scenes::Commands::RemoveScene::Type::ResponseType &);
|
||||
using remove_all_scenes_callback =
|
||||
void (*)(void *, const chip::app::Clusters::Scenes::Commands::RemoveAllScenes::Type::ResponseType &);
|
||||
using store_scene_callback = void (*)(void *,
|
||||
const chip::app::Clusters::Scenes::Commands::StoreScene::Type::ResponseType &);
|
||||
using get_scene_membership_callback =
|
||||
void (*)(void *, const chip::app::Clusters::Scenes::Commands::GetSceneMembership::Type::ResponseType &);
|
||||
|
||||
esp_err_t send_add_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id, uint8_t scene_id,
|
||||
uint16_t transition_time, char *scene_name, extension_field_sets &efs,
|
||||
add_scene_callback add_scene_cb);
|
||||
|
||||
esp_err_t send_view_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id, view_scene_callback view_scene_cb);
|
||||
|
||||
esp_err_t send_remove_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id, remove_scene_callback remove_scene_cb);
|
||||
|
||||
esp_err_t send_remove_all_scenes(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
remove_all_scenes_callback remove_all_scenes_cb);
|
||||
|
||||
esp_err_t send_store_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id, store_scene_callback store_scene_cb);
|
||||
|
||||
esp_err_t send_recall_scene(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
uint8_t scene_id);
|
||||
|
||||
esp_err_t send_get_scene_membership(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t group_id,
|
||||
get_scene_membership_callback get_scene_membership_cb);
|
||||
|
||||
} // namespace command
|
||||
} // namespace scenes
|
||||
|
||||
namespace thermostat {
|
||||
namespace command {
|
||||
|
||||
using transitions =
|
||||
chip::app::DataModel::List<chip::app::Clusters::Thermostat::Structs::ThermostatScheduleTransition::Type>;
|
||||
|
||||
using get_weekly_schedule_callback =
|
||||
void (*)(void *, const chip::app::Clusters::Thermostat::Commands::GetWeeklySchedule::Type::ResponseType &);
|
||||
|
||||
esp_err_t send_setpoint_raise_lower(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t mode,
|
||||
uint8_t amount);
|
||||
|
||||
esp_err_t send_set_weekly_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
uint8_t num_of_tras_for_seq, uint8_t day_of_week_for_seq, uint8_t mode_for_seq,
|
||||
transitions &trans);
|
||||
|
||||
esp_err_t send_get_weekly_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t day_to_return,
|
||||
uint8_t mode_to_return, get_weekly_schedule_callback get_weekly_schedule_cb);
|
||||
|
||||
esp_err_t send_clear_weekly_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id);
|
||||
|
||||
} // namespace command
|
||||
} // namespace thermostat
|
||||
|
||||
namespace door_lock {
|
||||
namespace command {
|
||||
|
||||
using get_week_day_schedule_callback =
|
||||
void (*)(void *, const chip::app::Clusters::DoorLock::Commands::GetWeekDaySchedule::Type::ResponseType &);
|
||||
|
||||
using get_year_day_schedule_callback =
|
||||
void (*)(void *, const chip::app::Clusters::DoorLock::Commands::GetYearDaySchedule::Type::ResponseType &);
|
||||
|
||||
using get_holiday_schedule_callback =
|
||||
void (*)(void *, const chip::app::Clusters::DoorLock::Commands::GetHolidaySchedule::Type::ResponseType &);
|
||||
|
||||
using get_user_callback = void (*)(void *,
|
||||
const chip::app::Clusters::DoorLock::Commands::GetUser::Type::ResponseType &);
|
||||
|
||||
using set_credential_callback =
|
||||
void (*)(void *, const chip::app::Clusters::DoorLock::Commands::SetCredential::Type::ResponseType &);
|
||||
|
||||
using get_credential_status_callback =
|
||||
void (*)(void *, const chip::app::Clusters::DoorLock::Commands::GetCredentialStatus::Type::ResponseType &);
|
||||
|
||||
using credential_struct = chip::app::Clusters::DoorLock::Structs::CredentialStruct::Type;
|
||||
|
||||
esp_err_t send_lock_door(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_unlock_door(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_unlock_with_timeout(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t timeout,
|
||||
uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_set_week_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t week_day_index,
|
||||
uint16_t user_index, uint8_t days_mask, uint8_t start_hour, uint8_t start_minute,
|
||||
uint8_t end_hour, uint8_t end_minute);
|
||||
|
||||
esp_err_t send_get_week_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t week_day_index,
|
||||
uint16_t user_index, get_week_day_schedule_callback success_cb);
|
||||
|
||||
esp_err_t send_clear_week_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
uint8_t week_day_index, uint16_t user_index);
|
||||
|
||||
esp_err_t send_set_year_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t year_day_index,
|
||||
uint16_t user_index, uint32_t local_start_time, uint32_t local_end_time);
|
||||
|
||||
esp_err_t send_get_year_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t year_day_index,
|
||||
uint16_t user_index, get_year_day_schedule_callback success_cb);
|
||||
|
||||
esp_err_t send_clear_year_day_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
uint8_t year_day_index, uint16_t user_index);
|
||||
|
||||
esp_err_t send_set_holiday_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t holiday_index,
|
||||
uint32_t local_start_time, uint32_t local_end_time, uint8_t operating_mode);
|
||||
|
||||
esp_err_t send_get_holiday_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t holiday_index,
|
||||
get_holiday_schedule_callback success_cb);
|
||||
|
||||
esp_err_t send_clear_holiday_schedule(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t holiday_index);
|
||||
|
||||
esp_err_t send_set_user(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t operation_type,
|
||||
uint16_t user_index, char *user_name, uint32_t user_unique_id, uint8_t user_status,
|
||||
uint8_t user_type, uint8_t credential_rule, uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_get_user(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t user_index,
|
||||
get_user_callback success_cb);
|
||||
|
||||
esp_err_t send_clear_user(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint16_t user_index,
|
||||
uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_set_credential(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t operation_type,
|
||||
credential_struct credential, uint8_t *credential_data, size_t credential_len,
|
||||
uint16_t user_index, uint8_t user_status, uint8_t user_type,
|
||||
set_credential_callback success_cb, uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_get_credential_status(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
credential_struct &credential, get_credential_status_callback success_cb);
|
||||
|
||||
esp_err_t send_clear_credential(peer_device_t *remote_device, uint16_t remote_endpoint_id,
|
||||
credential_struct &credential, uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
esp_err_t send_unbolt_door(peer_device_t *remote_device, uint16_t remote_endpoint_id, uint8_t *pin_code,
|
||||
size_t pin_code_len, uint16_t timed_invoke_timeout_ms);
|
||||
|
||||
} // namespace command
|
||||
} // namespace door_lock
|
||||
|
||||
namespace window_covering {
|
||||
namespace command {
|
||||
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
#else
|
||||
#include <app/server/Server.h>
|
||||
#endif
|
||||
#include <esp_check.h>
|
||||
#include <esp_matter_controller_cluster_command.h>
|
||||
#include <esp_matter_controller_utils.h>
|
||||
#include <esp_matter_mem.h>
|
||||
@@ -87,7 +88,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
switch (command_data->command_id) {
|
||||
case LevelControl::Commands::Move::Id:
|
||||
if (command_data->command_data_count != 4) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: move_mode, rate, option_mask, option_override");
|
||||
ESP_LOGE(TAG, "The command data should in following order: move_mode, rate, option_mask, option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::level_control::command::send_move(
|
||||
@@ -101,7 +102,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
if (command_data->command_data_count != 4) {
|
||||
ESP_LOGE(
|
||||
TAG,
|
||||
"The command date should in following order: level, transition_time, option_mask, option_override");
|
||||
"The command data should in following order: level, transition_time, option_mask, option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::level_control::command::send_move_to_level(
|
||||
@@ -114,7 +115,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
case LevelControl::Commands::Step::Id:
|
||||
if (command_data->command_data_count != 5) {
|
||||
ESP_LOGE(TAG,
|
||||
"The command date should in following order: step_mode, step_size, transition_time, option_mask, "
|
||||
"The command data should in following order: step_mode, step_size, transition_time, option_mask, "
|
||||
"option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
@@ -128,7 +129,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
break;
|
||||
case LevelControl::Commands::Stop::Id:
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: option_mask, option_override");
|
||||
ESP_LOGE(TAG, "The command data should in following order: option_mask, option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::level_control::command::send_stop(
|
||||
@@ -153,7 +154,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
case ColorControl::Commands::MoveToHue::Id:
|
||||
if (command_data->command_data_count != 5) {
|
||||
ESP_LOGE(TAG,
|
||||
"The command date should in following order: hue, direction, transition_time, option_mask, "
|
||||
"The command data should in following order: hue, direction, transition_time, option_mask, "
|
||||
"option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
@@ -168,7 +169,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
case ColorControl::Commands::MoveToSaturation::Id:
|
||||
if (command_data->command_data_count != 4) {
|
||||
ESP_LOGE(TAG,
|
||||
"The command date should in following order: saturation, transition_time, option_mask, "
|
||||
"The command data should in following order: saturation, transition_time, option_mask, "
|
||||
"option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
@@ -182,7 +183,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
case ColorControl::Commands::MoveToHueAndSaturation::Id:
|
||||
if (command_data->command_data_count != 5) {
|
||||
ESP_LOGE(TAG,
|
||||
"The command date should in following order: hue, saturation, transition_time, option_mask, "
|
||||
"The command data should in following order: hue, saturation, transition_time, option_mask, "
|
||||
"option_override");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
@@ -278,7 +279,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
switch (command_data->command_id) {
|
||||
case GroupKeyManagement::Commands::KeySetWrite::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: group_keyset");
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_keyset");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
group_keyset_struct keyset_struct;
|
||||
@@ -293,7 +294,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
}
|
||||
case GroupKeyManagement::Commands::KeySetRead::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: group_keyset_id");
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_keyset_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::group_key_management::command::send_keyset_read(
|
||||
@@ -332,7 +333,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
switch (command_data->command_id) {
|
||||
case Groups::Commands::AddGroup::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: group_id group_name");
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id group_name");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::groups::command::send_add_group(
|
||||
@@ -343,7 +344,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
}
|
||||
case Groups::Commands::ViewGroup::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: group_id");
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::groups::command::send_view_group(
|
||||
@@ -353,7 +354,7 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
}
|
||||
case Groups::Commands::RemoveGroup::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command date should in following order: group_id");
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::groups::command::send_remove_group(
|
||||
@@ -369,6 +370,374 @@ static esp_err_t send_command(command_data_t *command_data, peer_device_t *remot
|
||||
|
||||
} // namespace groups
|
||||
|
||||
namespace identify {
|
||||
|
||||
static esp_err_t send_command(command_data_t *command_data, peer_device_t *remote_device, uint16_t remote_endpoint_id)
|
||||
{
|
||||
switch (command_data->command_id) {
|
||||
case Identify::Commands::Identify::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: identify_time");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::identify::command::send_identify(
|
||||
remote_device, remote_endpoint_id, /* identify_time */ string_to_uint16(command_data->command_data_str[0]));
|
||||
break;
|
||||
}
|
||||
case Identify::Commands::TriggerEffect::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: effect_identifier effect_variant");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::identify::command::send_trigger_effect(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* effect_identifier */ string_to_uint8(command_data->command_data_str[0]),
|
||||
/* effect_variant */ string_to_uint8(command_data->command_data_str[1]));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
} // namespace identify
|
||||
|
||||
namespace scenes {
|
||||
|
||||
static void add_scene_success_callback(void *ctx, const Scenes::Commands::AddScene::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("addSceneResponse", 1, response);
|
||||
}
|
||||
|
||||
static void view_scene_success_callback(void *ctx, const Scenes::Commands::ViewScene::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("viewSceneResponse", 1, response);
|
||||
}
|
||||
|
||||
static void remove_scene_success_callback(void *ctx, const Scenes::Commands::RemoveScene::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("removeSceneResponse", 1, response);
|
||||
}
|
||||
|
||||
static void remove_all_scenes_success_callback(void *ctx,
|
||||
const Scenes::Commands::RemoveAllScenes::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("removeAllScenesResponse", 1, response);
|
||||
}
|
||||
|
||||
static void store_scene_success_callback(void *ctx, const Scenes::Commands::StoreScene::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("storeSceneResponse", 1, response);
|
||||
}
|
||||
|
||||
static void get_scene_membership_success_callback(
|
||||
void *ctx, const Scenes::Commands::GetSceneMembership::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("getSceneMembershipResponse", 1, response);
|
||||
}
|
||||
|
||||
using extension_field_set = chip::app::Clusters::Scenes::Structs::ExtensionFieldSet::Type;
|
||||
using attribute_value_pair = chip::app::Clusters::Scenes::Structs::AttributeValuePair::Type;
|
||||
static constexpr uint8_t max_cluster_per_scene = CHIP_CONFIG_SCENES_MAX_CLUSTERS_PER_SCENE;
|
||||
static constexpr uint8_t max_attr_per_cluster = CHIP_CONFIG_SCENES_MAX_EXTENSION_FIELDSET_SIZE_PER_CLUSTER / 10;
|
||||
|
||||
static bool parse_extension_field_sets(char *json_str, extension_field_set efs_array[],
|
||||
attribute_value_pair avp_arrays[][max_attr_per_cluster], size_t &efs_size)
|
||||
{
|
||||
jparse_ctx_t jctx;
|
||||
if (json_parse_start(&jctx, json_str, strlen(json_str)) != 0) {
|
||||
return false;
|
||||
}
|
||||
size_t index = 0;
|
||||
while (index < max_cluster_per_scene && json_arr_get_object(&jctx, index) == 0) {
|
||||
int int_val;
|
||||
int64_t int64_val;
|
||||
int attr_val_list_size = 0;
|
||||
if (json_obj_get_int(&jctx, "clusterID", &int_val) != 0) {
|
||||
json_arr_leave_object(&jctx);
|
||||
json_parse_end(&jctx);
|
||||
return false;
|
||||
}
|
||||
efs_array[index].clusterID = int_val;
|
||||
if (json_obj_get_array(&jctx, "attributeValueList", &attr_val_list_size) == 0) {
|
||||
for (size_t attr_index = 0; attr_index < attr_val_list_size; ++attr_index) {
|
||||
if (json_arr_get_object(&jctx, attr_index) == 0) {
|
||||
if (json_obj_get_int(&jctx, "attributeID", &int_val) == 0) {
|
||||
avp_arrays[index][attr_index].attributeID = int_val;
|
||||
}
|
||||
if (json_obj_get_int64(&jctx, "attributeValue", &int64_val) == 0) {
|
||||
avp_arrays[index][attr_index].attributeValue = (uint32_t)int64_val;
|
||||
}
|
||||
json_arr_leave_object(&jctx);
|
||||
}
|
||||
}
|
||||
efs_array[index].attributeValueList =
|
||||
chip::app::DataModel::List<attribute_value_pair>(avp_arrays[index], attr_val_list_size);
|
||||
json_obj_leave_array(&jctx);
|
||||
} else {
|
||||
json_arr_leave_object(&jctx);
|
||||
json_parse_end(&jctx);
|
||||
return false;
|
||||
}
|
||||
json_arr_leave_object(&jctx);
|
||||
index++;
|
||||
}
|
||||
efs_size = index;
|
||||
json_parse_end(&jctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static esp_err_t send_command(command_data_t *command_data, peer_device_t *remote_device, uint16_t remote_endpoint_id)
|
||||
{
|
||||
switch (command_data->command_id) {
|
||||
case Scenes::Commands::AddScene::Id: {
|
||||
if (command_data->command_data_count != 5) {
|
||||
ESP_LOGE(TAG,
|
||||
"The command data should in following order: group_id scene_id transition_time scene_name "
|
||||
"extension_field_sets");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
extension_field_set efs_array[max_cluster_per_scene];
|
||||
attribute_value_pair avp_arrays[max_cluster_per_scene][max_attr_per_cluster];
|
||||
size_t efs_size = 0;
|
||||
if (!parse_extension_field_sets(command_data->command_data_str[4], efs_array, avp_arrays, efs_size)) {
|
||||
ESP_LOGE(TAG, "Failed to Parse extension_field_sets");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
cluster::scenes::command::extension_field_sets efs(efs_array, efs_size);
|
||||
|
||||
return esp_matter::cluster::scenes::command::send_add_scene(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]),
|
||||
/* scene_id */ string_to_uint8(command_data->command_data_str[1]),
|
||||
/* transition_time */ string_to_uint16(command_data->command_data_str[2]),
|
||||
/* scene_name */ command_data->command_data_str[3], efs, add_scene_success_callback);
|
||||
break;
|
||||
}
|
||||
case Scenes::Commands::ViewScene::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id scene_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::scenes::command::send_view_scene(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]),
|
||||
/* scene_id */ string_to_uint8(command_data->command_data_str[1]), view_scene_success_callback);
|
||||
break;
|
||||
}
|
||||
case Scenes::Commands::RemoveScene::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id scene_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::scenes::command::send_remove_scene(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]),
|
||||
/* scene_id */ string_to_uint8(command_data->command_data_str[1]), remove_scene_success_callback);
|
||||
break;
|
||||
}
|
||||
case Scenes::Commands::RemoveAllScenes::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::scenes::command::send_remove_all_scenes(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]), remove_all_scenes_success_callback);
|
||||
break;
|
||||
}
|
||||
case Scenes::Commands::StoreScene::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id scene_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::scenes::command::send_store_scene(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]),
|
||||
/* scene_id */ string_to_uint8(command_data->command_data_str[1]), store_scene_success_callback);
|
||||
break;
|
||||
}
|
||||
case Scenes::Commands::RecallScene::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id scene_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::scenes::command::send_recall_scene(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]),
|
||||
/* scene_id */ string_to_uint8(command_data->command_data_str[1]));
|
||||
break;
|
||||
}
|
||||
case Scenes::Commands::GetSceneMembership::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: group_id");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::scenes::command::send_get_scene_membership(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* group_id */ string_to_uint16(command_data->command_data_str[0]), get_scene_membership_success_callback);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
} // namespace scenes
|
||||
|
||||
namespace thermostat {
|
||||
|
||||
static void get_weekly_schedule_success_callback(
|
||||
void *ctx, const Thermostat::Commands::GetWeeklySchedule::Type::ResponseType &response)
|
||||
{
|
||||
DataModelLogger::LogValue("getWeeklyScheduleResponse", 1, response);
|
||||
}
|
||||
|
||||
static constexpr uint8_t max_transitions_len = 10;
|
||||
using schedule_transition = chip::app::Clusters::Thermostat::Structs::ThermostatScheduleTransition::Type;
|
||||
|
||||
static bool parse_schedule_transition_list(char *json_str, schedule_transition *transition_array,
|
||||
size_t &transitions_len)
|
||||
{
|
||||
jparse_ctx_t jctx;
|
||||
if (json_parse_start(&jctx, json_str, strlen(json_str)) != 0) {
|
||||
return false;
|
||||
}
|
||||
size_t index = 0;
|
||||
while (index < max_transitions_len && json_arr_get_object(&jctx, index) == 0) {
|
||||
int int_val;
|
||||
if (json_obj_get_int(&jctx, "transitionTime", &int_val) != 0) {
|
||||
json_arr_leave_object(&jctx);
|
||||
json_parse_end(&jctx);
|
||||
return false;
|
||||
}
|
||||
transition_array[index].transitionTime = int_val;
|
||||
|
||||
if (json_obj_get_int(&jctx, "coolSetpoint", &int_val) == 0) {
|
||||
transition_array[index].coolSetpoint.SetNonNull((int16_t)int_val);
|
||||
} else {
|
||||
transition_array[index].coolSetpoint.SetNull();
|
||||
}
|
||||
|
||||
if (json_obj_get_int(&jctx, "heatSetpoint", &int_val) == 0) {
|
||||
transition_array[index].heatSetpoint.SetNonNull((int16_t)int_val);
|
||||
} else {
|
||||
transition_array[index].heatSetpoint.SetNull();
|
||||
}
|
||||
|
||||
json_arr_leave_object(&jctx);
|
||||
index++;
|
||||
}
|
||||
transitions_len = index;
|
||||
json_parse_end(&jctx);
|
||||
return true;
|
||||
}
|
||||
|
||||
static esp_err_t send_command(command_data_t *command_data, peer_device_t *remote_device, uint16_t remote_endpoint_id)
|
||||
{
|
||||
switch (command_data->command_id) {
|
||||
case Thermostat::Commands::SetpointRaiseLower::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: mode amount");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::thermostat::command::send_setpoint_raise_lower(
|
||||
remote_device, remote_endpoint_id, /* mode */ string_to_uint8(command_data->command_data_str[0]),
|
||||
/* amount */ string_to_uint8(command_data->command_data_str[1]));
|
||||
break;
|
||||
}
|
||||
case Thermostat::Commands::SetWeeklySchedule::Id: {
|
||||
if (command_data->command_data_count != 4) {
|
||||
ESP_LOGE(TAG,
|
||||
"The command data should in following order: number_of_transitions_for_sequence "
|
||||
"day_of_week_for_sequence mode_for_sequence transitions");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
schedule_transition transition_array[max_transitions_len];
|
||||
size_t transitions_len = 0;
|
||||
if (!parse_schedule_transition_list(command_data->command_data_str[3], transition_array, transitions_len)) {
|
||||
ESP_LOGE(TAG, "Failed to parse schedule_transition_list");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
cluster::thermostat::command::transitions trans(transition_array, transitions_len);
|
||||
return esp_matter::cluster::thermostat::command::send_set_weekly_schedule(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* number_of_transitions_for_sequence */ string_to_uint8(command_data->command_data_str[0]),
|
||||
/* day_of_week_for_sequence */ string_to_uint8(command_data->command_data_str[1]),
|
||||
/* mode_for_sequence */ string_to_uint8(command_data->command_data_str[2]),
|
||||
/* transitions */ trans);
|
||||
break;
|
||||
}
|
||||
case Thermostat::Commands::GetWeeklySchedule::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: data_to_return mode_to_return");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::thermostat::command::send_get_weekly_schedule(
|
||||
remote_device, remote_endpoint_id, /* data_to_return */ string_to_uint8(command_data->command_data_str[0]),
|
||||
/* mode_to_return */ string_to_uint8(command_data->command_data_str[1]),
|
||||
get_weekly_schedule_success_callback);
|
||||
|
||||
break;
|
||||
}
|
||||
case Thermostat::Commands::ClearWeeklySchedule::Id: {
|
||||
return esp_matter::cluster::thermostat::command::send_clear_weekly_schedule(remote_device, remote_endpoint_id);
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
} // namespace thermostat
|
||||
|
||||
namespace door_lock {
|
||||
|
||||
static esp_err_t send_command(command_data_t *command_data, peer_device_t *remote_device, uint16_t remote_endpoint_id)
|
||||
{
|
||||
switch (command_data->command_id) {
|
||||
case DoorLock::Commands::LockDoor::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: timed_invoke_timeout_ms");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::door_lock::command::send_lock_door(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* timed_invoke_timeout_ms */ string_to_uint16(command_data->command_data_str[0]));
|
||||
break;
|
||||
}
|
||||
case DoorLock::Commands::UnlockDoor::Id: {
|
||||
if (command_data->command_data_count != 1) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: timed_invoke_timeout_ms");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::door_lock::command::send_unlock_door(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* timed_invoke_timeout_ms */ string_to_uint16(command_data->command_data_str[0]));
|
||||
break;
|
||||
}
|
||||
case DoorLock::Commands::UnlockWithTimeout::Id: {
|
||||
if (command_data->command_data_count != 2) {
|
||||
ESP_LOGE(TAG, "The command data should in following order: timeout timed_invoke_timeout_ms");
|
||||
return ESP_ERR_INVALID_ARG;
|
||||
}
|
||||
return esp_matter::cluster::door_lock::command::send_unlock_with_timeout(
|
||||
remote_device, remote_endpoint_id,
|
||||
/* timeout */ string_to_uint16(command_data->command_data_str[0]),
|
||||
/* timed_invoke_timeout_ms */ string_to_uint16(command_data->command_data_str[1]));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
} // namespace door_lock
|
||||
|
||||
namespace window_covering {
|
||||
|
||||
static esp_err_t send_command(command_data_t *command_data, peer_device_t *remote_device, uint16_t remote_endpoint_id)
|
||||
@@ -458,7 +827,19 @@ void cluster_command::on_device_connected_fcn(void *context, ExchangeManager &ex
|
||||
err = clusters::groups::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
break;
|
||||
case WindowCovering::Id:
|
||||
clusters::window_covering::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
err = clusters::window_covering::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
break;
|
||||
case Identify::Id:
|
||||
err = clusters::identify::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
break;
|
||||
case Scenes::Id:
|
||||
err = clusters::scenes::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
break;
|
||||
case Thermostat::Id:
|
||||
err = clusters::thermostat::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
break;
|
||||
case DoorLock::Id:
|
||||
err = clusters::door_lock::send_command(cmd->m_command_data, &device_proxy, cmd->m_endpoint_id);
|
||||
break;
|
||||
default:
|
||||
err = ESP_ERR_NOT_SUPPORTED;
|
||||
|
||||
@@ -91,3 +91,34 @@ uint8_t string_to_uint8(char *str)
|
||||
{
|
||||
return (uint8_t)string_to_uint32(str);
|
||||
}
|
||||
|
||||
int64_t string_to_int64(char *str)
|
||||
{
|
||||
return strtoll(str, NULL, 10);
|
||||
}
|
||||
|
||||
int32_t string_to_int32(char *str)
|
||||
{
|
||||
return strtol(str, NULL, 10);
|
||||
}
|
||||
|
||||
int16_t string_to_int16(char *str)
|
||||
{
|
||||
return (int16_t)string_to_int32(str);
|
||||
}
|
||||
|
||||
int8_t string_to_int8(char *str)
|
||||
{
|
||||
return (int8_t)string_to_int32(str);
|
||||
}
|
||||
|
||||
bool string_to_bool(char *str)
|
||||
{
|
||||
if (strcmp(str, "true") == 0) {
|
||||
return true;
|
||||
} else if (strcmp(str, "false") == 0) {
|
||||
return false;
|
||||
} else {
|
||||
return string_to_uint8(str);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,3 +57,13 @@ uint32_t string_to_uint32(char *str);
|
||||
uint16_t string_to_uint16(char *str);
|
||||
|
||||
uint8_t string_to_uint8(char *str);
|
||||
|
||||
int64_t string_to_int64(char *str);
|
||||
|
||||
int32_t string_to_int32(char *str);
|
||||
|
||||
int16_t string_to_int16(char *str);
|
||||
|
||||
int8_t string_to_int8(char *str);
|
||||
|
||||
bool string_to_bool(char *str);
|
||||
|
||||
@@ -535,6 +535,180 @@ static esp_err_t write_attribute(uint64_t node_id, uint16_t endpoint_id, uint32_
|
||||
|
||||
} // namespace group_key_management
|
||||
|
||||
namespace identify {
|
||||
|
||||
static esp_err_t write_attribute(uint64_t node_id, uint16_t endpoint_id, uint32_t attribute_id, char *attribute_val_str)
|
||||
{
|
||||
if (attribute_id == Identify::Attributes::IdentifyTime::Id) {
|
||||
write_command<uint16_t> *cmd = New<write_command<uint16_t>>(node_id, endpoint_id, Identify::Id, attribute_id,
|
||||
string_to_uint16(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
return cmd->send_command();
|
||||
}
|
||||
return ESP_ERR_NOT_SUPPORTED;
|
||||
}
|
||||
|
||||
} // namespace identify
|
||||
|
||||
namespace thermostat {
|
||||
|
||||
static esp_err_t write_attribute(uint64_t node_id, uint16_t endpoint_id, uint32_t attribute_id, char *attribute_val_str)
|
||||
{
|
||||
esp_err_t err = ESP_OK;
|
||||
switch (attribute_id) {
|
||||
// uint8 value type
|
||||
case Thermostat::Attributes::HVACSystemTypeConfiguration::Id:
|
||||
case Thermostat::Attributes::RemoteSensing::Id:
|
||||
case Thermostat::Attributes::ControlSequenceOfOperation::Id:
|
||||
case Thermostat::Attributes::SystemMode::Id:
|
||||
case Thermostat::Attributes::TemperatureSetpointHold::Id:
|
||||
case Thermostat::Attributes::ThermostatProgrammingOperationMode::Id:
|
||||
case Thermostat::Attributes::OccupiedSetback::Id:
|
||||
case Thermostat::Attributes::UnoccupiedSetback::Id:
|
||||
case Thermostat::Attributes::EmergencyHeatDelta::Id:
|
||||
case Thermostat::Attributes::ACType::Id:
|
||||
case Thermostat::Attributes::ACRefrigerantType::Id:
|
||||
case Thermostat::Attributes::ACCompressorType::Id:
|
||||
case Thermostat::Attributes::ACLouverPosition::Id:
|
||||
case Thermostat::Attributes::ACCapacityformat::Id: {
|
||||
write_command<uint8_t> *cmd = New<write_command<uint8_t>>(node_id, endpoint_id, Thermostat::Id, attribute_id,
|
||||
string_to_uint8(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// int8 value type
|
||||
case Thermostat::Attributes::LocalTemperatureCalibration::Id:
|
||||
case Thermostat::Attributes::MinSetpointDeadBand::Id: {
|
||||
write_command<int8_t> *cmd = New<write_command<int8_t>>(node_id, endpoint_id, Thermostat::Id, attribute_id,
|
||||
string_to_int8(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// int16 value type
|
||||
case Thermostat::Attributes::OccupiedCoolingSetpoint::Id:
|
||||
case Thermostat::Attributes::OccupiedHeatingSetpoint::Id:
|
||||
case Thermostat::Attributes::UnoccupiedCoolingSetpoint::Id:
|
||||
case Thermostat::Attributes::UnoccupiedHeatingSetpoint::Id:
|
||||
case Thermostat::Attributes::MinHeatSetpointLimit::Id:
|
||||
case Thermostat::Attributes::MaxHeatSetpointLimit::Id:
|
||||
case Thermostat::Attributes::MinCoolSetpointLimit::Id:
|
||||
case Thermostat::Attributes::MaxCoolSetpointLimit::Id: {
|
||||
write_command<int16_t> *cmd = New<write_command<int16_t>>(node_id, endpoint_id, Thermostat::Id, attribute_id,
|
||||
string_to_int16(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// uint16 value type
|
||||
case Thermostat::Attributes::TemperatureSetpointHoldDuration::Id:
|
||||
case Thermostat::Attributes::ACCapacity::Id: {
|
||||
write_command<uint16_t> *cmd = New<write_command<uint16_t>>(node_id, endpoint_id, Thermostat::Id, attribute_id,
|
||||
string_to_uint16(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// uint32 value type
|
||||
case Thermostat::Attributes::ACErrorCode::Id: {
|
||||
write_command<uint32_t> *cmd = New<write_command<uint32_t>>(node_id, endpoint_id, Thermostat::Id, attribute_id,
|
||||
string_to_uint32(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
err = ESP_ERR_NOT_SUPPORTED;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
|
||||
} // namespace thermostat
|
||||
|
||||
namespace door_lock {
|
||||
|
||||
constexpr size_t k_max_language_str_len = 3;
|
||||
|
||||
static void language_str_free(void *ctx)
|
||||
{
|
||||
chip::Platform::MemoryFree(ctx);
|
||||
}
|
||||
|
||||
static esp_err_t write_attribute(uint64_t node_id, uint16_t endpoint_id, uint32_t attribute_id, char *attribute_val_str)
|
||||
{
|
||||
esp_err_t err = ESP_OK;
|
||||
switch (attribute_id) {
|
||||
// uint32 value type
|
||||
case DoorLock::Attributes::DoorOpenEvents::Id:
|
||||
case DoorLock::Attributes::DoorClosedEvents::Id:
|
||||
case DoorLock::Attributes::AutoRelockTime::Id: {
|
||||
write_command<uint32_t> *cmd = New<write_command<uint32_t>>(node_id, endpoint_id, DoorLock::Id, attribute_id,
|
||||
string_to_uint32(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// uint16 value type
|
||||
case DoorLock::Attributes::OpenPeriod::Id:
|
||||
case DoorLock::Attributes::ExpiringUserTimeout::Id: {
|
||||
write_command<uint16_t> *cmd = New<write_command<uint16_t>>(node_id, endpoint_id, DoorLock::Id, attribute_id,
|
||||
string_to_uint16(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// string value type
|
||||
case DoorLock::Attributes::Language::Id: {
|
||||
char *language_buf = static_cast<char *>(chip::Platform::MemoryAlloc(k_max_language_str_len));
|
||||
if (!language_buf) {
|
||||
ESP_LOGE(TAG, "Failed to alloc memory for language_buf");
|
||||
return ESP_ERR_NO_MEM;
|
||||
}
|
||||
strncpy(language_buf, attribute_val_str, strnlen(attribute_val_str, k_max_language_str_len - 1));
|
||||
language_buf[k_max_language_str_len - 1] = 0;
|
||||
write_command<chip::CharSpan> *cmd = New<write_command<chip::CharSpan>>(
|
||||
node_id, endpoint_id, DoorLock::Id, attribute_id, chip::CharSpan(language_buf, strlen(language_buf)));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
cmd->set_attribute_free_handler(language_str_free, language_buf);
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// uint8 value type
|
||||
case DoorLock::Attributes::LEDSettings::Id:
|
||||
case DoorLock::Attributes::SoundVolume::Id:
|
||||
case DoorLock::Attributes::OperatingMode::Id:
|
||||
case DoorLock::Attributes::LocalProgrammingFeatures::Id:
|
||||
case DoorLock::Attributes::WrongCodeEntryLimit::Id:
|
||||
case DoorLock::Attributes::UserCodeTemporaryDisableTime::Id: {
|
||||
write_command<uint8_t> *cmd = New<write_command<uint8_t>>(node_id, endpoint_id, DoorLock::Id, attribute_id,
|
||||
string_to_uint8(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
// boolean value type
|
||||
case DoorLock::Attributes::EnableLocalProgramming::Id:
|
||||
case DoorLock::Attributes::EnableOneTouchLocking::Id:
|
||||
case DoorLock::Attributes::EnableInsideStatusLED::Id:
|
||||
case DoorLock::Attributes::EnablePrivacyModeButton::Id:
|
||||
case DoorLock::Attributes::SendPINOverTheAir::Id:
|
||||
case DoorLock::Attributes::RequirePINforRemoteOperation::Id: {
|
||||
write_command<bool> *cmd = New<write_command<bool>>(node_id, endpoint_id, DoorLock::Id, attribute_id,
|
||||
string_to_bool(attribute_val_str));
|
||||
ESP_RETURN_ON_FALSE(cmd, ESP_ERR_NO_MEM, TAG, "Failed to alloc memory for write_command");
|
||||
err = cmd->send_command();
|
||||
break;
|
||||
}
|
||||
default:
|
||||
err = ESP_ERR_NOT_SUPPORTED;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
} // namespace door_lock
|
||||
|
||||
namespace occupancy_sensing {
|
||||
|
||||
static esp_err_t write_attribute(uint64_t node_id, uint16_t endpoint_id, uint32_t attribute_id, char *attribute_val_str)
|
||||
@@ -585,6 +759,7 @@ static esp_err_t write_attribute(uint64_t node_id, uint16_t endpoint_id, uint32_
|
||||
}
|
||||
default:
|
||||
err = ESP_ERR_NOT_SUPPORTED;
|
||||
break;
|
||||
}
|
||||
return err;
|
||||
}
|
||||
@@ -648,14 +823,23 @@ esp_err_t send_write_attr_command(uint64_t node_id, uint16_t endpoint_id, uint32
|
||||
err = clusters::group_key_management::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
break;
|
||||
case OccupancySensing::Id:
|
||||
return clusters::occupancy_sensing::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
err = clusters::occupancy_sensing::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
break;
|
||||
case WindowCovering::Id:
|
||||
return clusters::window_covering::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
err = clusters::window_covering::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
break;
|
||||
case ThermostatUserInterfaceConfiguration::Id:
|
||||
return clusters::thermostat_userinterface_configuration::write_attribute(node_id, endpoint_id, attribute_id,
|
||||
attribute_val_str);
|
||||
err = clusters::thermostat_userinterface_configuration::write_attribute(node_id, endpoint_id, attribute_id,
|
||||
attribute_val_str);
|
||||
break;
|
||||
case Identify::Id:
|
||||
err = clusters::identify::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
break;
|
||||
case Thermostat::Id:
|
||||
err = clusters::thermostat::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
break;
|
||||
case DoorLock::Id:
|
||||
err = clusters::door_lock::write_attribute(node_id, endpoint_id, attribute_id, attribute_val_str);
|
||||
break;
|
||||
default:
|
||||
err = ESP_ERR_NOT_SUPPORTED;
|
||||
|
||||
+43
-2
@@ -900,7 +900,26 @@ The ``pairing`` command is used for commissioning the end-devices. Here are thre
|
||||
|
||||
2.4.5.3 Cluster commands
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The ``invoke-cmd`` command is used for sending cluster commands to the end-devices. Currently the controller only supports commands of on-off, level-control, and color-control clusters. The on-off cluster supports both unicast and multicast sending, and the other two clusters only support unicast sending.
|
||||
The ``invoke-cmd`` command is used for sending cluster commands to the end-devices. Currently the controller component has implemented the following commands for various clusters.
|
||||
|
||||
**Unicast commands**:
|
||||
|
||||
| **OnOff Cluster** (On, Off, Toggle)
|
||||
| **LevelControl Cluster** (Move, MoveToLevel, Step, Stop)
|
||||
| **ColorControl Cluster** (MoveToHue, MoveToSaturation, MoveToHueAndSaturation)
|
||||
| **GroupKeyManagement Cluster** (KeySetWrite, KeySetRead)
|
||||
| **Groups Cluster** (AddGroup, ViewGroup, RemoveGroup)
|
||||
| **Identify Cluster** (Identify, TriggerEffect)
|
||||
| **Scenes Cluster** (AddScene, ViewScene, RemoveScene, RemoveAllScenes, StoreScene, RecallScene, GetSceneMembership)
|
||||
| **Thermostat Cluster** (SetpointRaiseLower, SetWeeklySchedule, GetWeeklySchedule, ClearWeeklySchedule)
|
||||
| **DoorLock Cluster** (LockDoor, UnlockDoor, UnlockWithTimeout)
|
||||
| **WindowCovering Cluster** (UpOrOpen, DownOrClose, StopMotion, GoToLiftValue, GoToLiftPercentage, GoToTiltValue, GoToTiltPercentage)
|
||||
|
||||
**Group commands**:
|
||||
|
||||
| **OnOff Cluster** (On, Off, Toggle)
|
||||
|
||||
If you want to utilize commands not list above, you can use ``esp_matter::controller::cluster_command::set_unsupported_cluster_command_handler()`` and ``esp_matter::controller::cluster_command::set_unsupported_cluster_group_command_handler()`` to set handlers for the commands that are not currently implemented.
|
||||
|
||||
- Send the cluster command:
|
||||
|
||||
@@ -932,7 +951,22 @@ The ``read-event`` command is used for sending the commands of reading events on
|
||||
|
||||
2.4.5.6 Write attribute commands
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The ``write-attr`` command is used for sending the commands of writing attributes on the end-device. Currently the controller only supports unicast-attributes-writing of on-off, level-control, color-control, access-control, binding, and group-key-management clusters.
|
||||
The ``write-attr`` command is used for sending the commands of writing attributes on the end-device. Currently the controller component has implemented the capability to write attributes of the following clusters.
|
||||
|
||||
| **OnOff Cluster**
|
||||
| **LevelControl Cluster**
|
||||
| **ColorControl Cluster**
|
||||
| **AccessControl Cluster**
|
||||
| **Binding Cluster**
|
||||
| **GroupKeyManagement Cluster**
|
||||
| **Identify Cluster**
|
||||
| **Thermostat Cluster**
|
||||
| **DoorLock Cluster**
|
||||
| **OccupancySensing Cluster**
|
||||
| **WindowCovering Cluster**
|
||||
| **ThermostatUserInterfaceConfiguration Cluster**
|
||||
|
||||
If you want to send the writing-attribute commands to the clusters not listed above, you could use ``esp_matter::controller::set_unsupported_attribute_write_handler()`` to set the handler for clusters that are are not currently implemented.
|
||||
|
||||
- Send the write-attribute command:
|
||||
|
||||
@@ -940,6 +974,13 @@ The ``write-attr`` command is used for sending the commands of writing attribute
|
||||
|
||||
matter esp controller write-attr <node_id> <endpoint_id> <cluster_id> <attribute_id> <attribute_value>
|
||||
|
||||
Note: ``attribute_value`` could be formatted as JSON string, as an example, For Binding attribute, you should use the follow JSON structure as the ``attribute_value`` : ``"[{\"node\":1, \"endpoint\":1, \"cluster\":6}]"``
|
||||
|
||||
::
|
||||
|
||||
matter esp controller write-attr <node_id> <endpoint_id> 30 0 "[{\"node\":1, \"endpoint\":1, \"cluster\":6}]"
|
||||
|
||||
|
||||
2.4.5.7 Subscribe attribute commands
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
The ``subs-attr`` command is used for sending the commands of subscribing attributes on the end-device.
|
||||
|
||||
Reference in New Issue
Block a user