mirror of
https://github.com/espressif/esp-matter.git
synced 2026-04-27 19:13:13 +00:00
data_model_gen: treat clusters with codegen implementation as non-SCI/non-migrated
This commit is contained in:
@@ -0,0 +1,3 @@
|
|||||||
|
# Collapse generated/auto-generated code in MR diffs (GitLab)
|
||||||
|
components/esp_matter/data_model/generated/** gitlab-generated
|
||||||
|
components/esp_matter/zap_common/** gitlab-generated
|
||||||
+3
-9
@@ -26,7 +26,6 @@
|
|||||||
#include <unit_localization_ids.h>
|
#include <unit_localization_ids.h>
|
||||||
#include <binding.h>
|
#include <binding.h>
|
||||||
#include <esp_matter_data_model_priv.h>
|
#include <esp_matter_data_model_priv.h>
|
||||||
#include <app/ClusterCallbacks.h>
|
|
||||||
|
|
||||||
using namespace chip::app::Clusters;
|
using namespace chip::app::Clusters;
|
||||||
using chip::app::CommandHandler;
|
using chip::app::CommandHandler;
|
||||||
@@ -49,12 +48,11 @@ uint32_t get_id()
|
|||||||
return TemperatureUnit::Id;
|
return TemperatureUnit::Id;
|
||||||
}
|
}
|
||||||
|
|
||||||
esp_err_t add(cluster_t *cluster, config_t *config)
|
esp_err_t add(cluster_t *cluster)
|
||||||
{
|
{
|
||||||
VerifyOrReturnError(cluster, ESP_ERR_INVALID_ARG);
|
VerifyOrReturnError(cluster, ESP_ERR_INVALID_ARG);
|
||||||
VerifyOrReturnError(config, ESP_ERR_INVALID_ARG);
|
|
||||||
update_feature_map(cluster, get_id());
|
update_feature_map(cluster, get_id());
|
||||||
attribute::create_temperature_unit(cluster, config->temperature_unit);
|
attribute::create_temperature_unit(cluster, 0);
|
||||||
attribute::create_supported_temperature_units(cluster, NULL, 0, 0);
|
attribute::create_supported_temperature_units(cluster, NULL, 0, 0);
|
||||||
|
|
||||||
return ESP_OK;
|
return ESP_OK;
|
||||||
@@ -68,9 +66,7 @@ attribute_t *create_temperature_unit(cluster_t *cluster, uint8_t value)
|
|||||||
{
|
{
|
||||||
uint32_t feature_map = get_feature_map_value(cluster);
|
uint32_t feature_map = get_feature_map_value(cluster);
|
||||||
VerifyOrReturnValue(has_feature(temperature_unit), NULL);
|
VerifyOrReturnValue(has_feature(temperature_unit), NULL);
|
||||||
attribute_t *attribute = esp_matter::attribute::create(cluster, TemperatureUnit::Id, ATTRIBUTE_FLAG_WRITABLE | ATTRIBUTE_FLAG_NONVOLATILE, esp_matter_enum8(value));
|
return esp_matter::attribute::create(cluster, TemperatureUnit::Id, ATTRIBUTE_FLAG_WRITABLE | ATTRIBUTE_FLAG_MANAGED_INTERNALLY | ATTRIBUTE_FLAG_NONVOLATILE, esp_matter_enum8(value));
|
||||||
esp_matter::attribute::add_bounds(attribute, esp_matter_enum8(0), esp_matter_enum8(2));
|
|
||||||
return attribute;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
attribute_t *create_supported_temperature_units(cluster_t *cluster, uint8_t *value, uint16_t length, uint16_t count)
|
attribute_t *create_supported_temperature_units(cluster_t *cluster, uint8_t *value, uint16_t length, uint16_t count)
|
||||||
@@ -102,8 +98,6 @@ cluster_t *create(endpoint_t *endpoint, config_t *config, uint8_t flags)
|
|||||||
/* Attributes not managed internally */
|
/* Attributes not managed internally */
|
||||||
global::attribute::create_cluster_revision(cluster, cluster_revision);
|
global::attribute::create_cluster_revision(cluster, cluster_revision);
|
||||||
|
|
||||||
cluster::set_init_and_shutdown_callbacks(cluster, ESPMatterUnitLocalizationClusterServerInitCallback,
|
|
||||||
ESPMatterUnitLocalizationClusterServerShutdownCallback);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return cluster;
|
return cluster;
|
||||||
|
|||||||
+1
-5
@@ -23,12 +23,8 @@ namespace unit_localization {
|
|||||||
|
|
||||||
namespace feature {
|
namespace feature {
|
||||||
namespace temperature_unit {
|
namespace temperature_unit {
|
||||||
typedef struct config {
|
|
||||||
uint8_t temperature_unit;
|
|
||||||
config() : temperature_unit(0) {}
|
|
||||||
} config_t;
|
|
||||||
uint32_t get_id();
|
uint32_t get_id();
|
||||||
esp_err_t add(cluster_t *cluster, config_t *config);
|
esp_err_t add(cluster_t *cluster);
|
||||||
} /* temperature_unit */
|
} /* temperature_unit */
|
||||||
|
|
||||||
} /* feature */
|
} /* feature */
|
||||||
|
|||||||
@@ -0,0 +1,41 @@
|
|||||||
|
{
|
||||||
|
"migrated_cluster": [
|
||||||
|
"access_control",
|
||||||
|
"administrator_commissioning",
|
||||||
|
"basic_information",
|
||||||
|
"binding",
|
||||||
|
"boolean_state_configuration",
|
||||||
|
"chime",
|
||||||
|
"descriptor",
|
||||||
|
"device_energy_management",
|
||||||
|
"diagnostic_logs",
|
||||||
|
"electrical_energy_measurement",
|
||||||
|
"electrical_power_measurement",
|
||||||
|
"ethernet_network_diagnostics",
|
||||||
|
"fixed_label",
|
||||||
|
"general_commissioning",
|
||||||
|
"general_diagnostics",
|
||||||
|
"group_key_management",
|
||||||
|
"groupcast",
|
||||||
|
"icd_management",
|
||||||
|
"identify",
|
||||||
|
"localization_configuration",
|
||||||
|
"network_commissioning",
|
||||||
|
"occupancy_sensor",
|
||||||
|
"operational_credentials",
|
||||||
|
"ota_software_update_provider",
|
||||||
|
"power_topology",
|
||||||
|
"push_av_stream_transport",
|
||||||
|
"resource_monitoring",
|
||||||
|
"scenes",
|
||||||
|
"software_diagnostics",
|
||||||
|
"soil_measurement",
|
||||||
|
"time_format_localization",
|
||||||
|
"time_synchronization",
|
||||||
|
"user_label",
|
||||||
|
"webrtc_transport_provider",
|
||||||
|
"webrtc_transport_requestor",
|
||||||
|
"wifi_network_diagnostics"
|
||||||
|
],
|
||||||
|
"migrated_cluster_with_codegen_impl": []
|
||||||
|
}
|
||||||
@@ -92,9 +92,15 @@ def find_delegate_server_files(root_dir):
|
|||||||
"""
|
"""
|
||||||
delegate_server_files = set()
|
delegate_server_files = set()
|
||||||
|
|
||||||
for dirpath, _, filenames in os.walk(root_dir):
|
for dirpath, dirnames, filenames in os.walk(root_dir):
|
||||||
|
if "codegen" in dirnames:
|
||||||
|
scan_dir = os.path.join(dirpath, "codegen")
|
||||||
|
filenames = os.listdir(scan_dir)
|
||||||
|
dirnames[:] = []
|
||||||
|
else:
|
||||||
|
scan_dir = dirpath
|
||||||
for filename in filenames:
|
for filename in filenames:
|
||||||
full_path = os.path.join(dirpath, filename)
|
full_path = os.path.join(scan_dir, filename)
|
||||||
|
|
||||||
# Check for delegate callback in server files
|
# Check for delegate callback in server files
|
||||||
if "-delegate.h" in filename.lower():
|
if "-delegate.h" in filename.lower():
|
||||||
|
|||||||
@@ -13,7 +13,8 @@
|
|||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
import os
|
import os
|
||||||
import logging
|
import logging
|
||||||
from utils.config import FileNames
|
import subprocess
|
||||||
|
from utils.config import ARTIFACTS_DIR, FileNames
|
||||||
from utils.helper import write_to_file
|
from utils.helper import write_to_file
|
||||||
from utils.exceptions import ConfigurationError, CodeGenerationError
|
from utils.exceptions import ConfigurationError, CodeGenerationError
|
||||||
from chip_source_deps.cluster_mapping import normalize_cluster_name
|
from chip_source_deps.cluster_mapping import normalize_cluster_name
|
||||||
@@ -29,49 +30,70 @@ from chip_source_deps.internally_managed_attributes import (
|
|||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
def generate_migrated_clusters(
|
def _find_migrated_cluster_dirs(root_dir: str) -> set[str]:
|
||||||
root_dir, migrated_clusters_json_file_path
|
"""Return the set of top-level cluster directories under ``root_dir`` that
|
||||||
) -> tuple[bool, str]:
|
contain a header declaring a class inheriting ``DefaultServerCluster``.
|
||||||
"""Find all clusters that have a CodegenIntegration.cpp file
|
"""
|
||||||
|
result = subprocess.run(
|
||||||
|
["grep", "-rl", "--include=*.h", "public DefaultServerCluster", root_dir],
|
||||||
|
check=False,
|
||||||
|
capture_output=True,
|
||||||
|
text=True,
|
||||||
|
)
|
||||||
|
if result.returncode >= 2:
|
||||||
|
raise RuntimeError(f"grep failed ({result.returncode}): {result.stderr}")
|
||||||
|
|
||||||
|
abs_root = os.path.abspath(root_dir)
|
||||||
|
cluster_dirs: set[str] = set()
|
||||||
|
for header_path in result.stdout.splitlines():
|
||||||
|
rel = os.path.relpath(header_path, abs_root)
|
||||||
|
top = rel.split(os.sep, 1)[0]
|
||||||
|
if top and top != "..":
|
||||||
|
cluster_dirs.add(os.path.join(abs_root, top))
|
||||||
|
return cluster_dirs
|
||||||
|
|
||||||
|
|
||||||
|
def generate_migrated_clusters(root_dir) -> tuple[bool, str]:
|
||||||
|
"""Find all migrated clusters and write them to ``Artifacts/migrated_clusters.json``.
|
||||||
|
|
||||||
|
The JSON holds two lists:
|
||||||
|
- ``migrated_cluster``: clusters using ``DefaultServerCluster`` at the
|
||||||
|
cluster top level; these feed data_model_gen.
|
||||||
|
- ``migrated_cluster_with_codegen_impl``: clusters that own a ``codegen/``
|
||||||
|
subdir; they have legacy implementation as it is and are skipped by data_model_gen.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
root_dir: The root directory to search in
|
root_dir: The root directory to search in.
|
||||||
migrated_clusters_json_file_path: Path to save the migrated clusters list
|
|
||||||
Returns:
|
Returns:
|
||||||
True if successful, False otherwise
|
True if successful, False otherwise
|
||||||
"""
|
"""
|
||||||
migrated_clusters = []
|
migrated_clusters = set()
|
||||||
|
migrated_clusters_with_codegen_impl = set()
|
||||||
|
|
||||||
try:
|
try:
|
||||||
for dirpath, _, filenames in os.walk(root_dir):
|
for cluster_dir in _find_migrated_cluster_dirs(root_dir):
|
||||||
for filename in filenames:
|
cluster_name = normalize_cluster_name(os.path.basename(cluster_dir))
|
||||||
if not filename.endswith(".cpp") and not filename.endswith(".h"):
|
if not cluster_name:
|
||||||
continue
|
continue
|
||||||
if (
|
if os.path.isdir(os.path.join(cluster_dir, "codegen")):
|
||||||
filename.lower() == "codegenintegration.cpp"
|
migrated_clusters_with_codegen_impl.add(cluster_name)
|
||||||
or filename.lower() == "codegeninstance.cpp"
|
|
||||||
):
|
|
||||||
cluster_name = os.path.basename(dirpath)
|
|
||||||
migrated_clusters.append(normalize_cluster_name(cluster_name))
|
|
||||||
else:
|
else:
|
||||||
with open(
|
migrated_clusters.add(cluster_name)
|
||||||
os.path.join(dirpath, filename), "r", encoding="utf-8"
|
|
||||||
) as file:
|
|
||||||
if "DefaultServerCluster" in file.read():
|
|
||||||
cluster_name = os.path.basename(dirpath)
|
|
||||||
migrated_clusters.append(
|
|
||||||
normalize_cluster_name(cluster_name)
|
|
||||||
)
|
|
||||||
|
|
||||||
migrated_clusters.sort()
|
payload = {
|
||||||
|
"migrated_cluster": sorted(migrated_clusters),
|
||||||
|
"migrated_cluster_with_codegen_impl": sorted(
|
||||||
|
migrated_clusters_with_codegen_impl
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
if write_to_file(migrated_clusters_json_file_path, migrated_clusters, "json"):
|
os.makedirs(ARTIFACTS_DIR, exist_ok=True)
|
||||||
logger.info(
|
artifact_path = os.path.join(ARTIFACTS_DIR, FileNames.MIGRATED_CLUSTERS.value)
|
||||||
f"Successfully written Migrated Clusters to {migrated_clusters_json_file_path}"
|
|
||||||
)
|
if write_to_file(artifact_path, payload, "json"):
|
||||||
|
logger.info(f"Successfully written Migrated Clusters to {artifact_path}")
|
||||||
return True, None
|
return True, None
|
||||||
else:
|
return False, f"Error writing to {artifact_path}"
|
||||||
return False, f"Error writing to {migrated_clusters_json_file_path}"
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
return False, f"Error generating migrated clusters: {str(e)}"
|
return False, f"Error generating migrated clusters: {str(e)}"
|
||||||
|
|
||||||
@@ -133,9 +155,6 @@ def generate_requirements(esp_matter_path, output_dir):
|
|||||||
FileNames.ZAP_FILTER_LIST: os.path.join(
|
FileNames.ZAP_FILTER_LIST: os.path.join(
|
||||||
output_dir, FileNames.ZAP_FILTER_LIST.value
|
output_dir, FileNames.ZAP_FILTER_LIST.value
|
||||||
),
|
),
|
||||||
FileNames.MIGRATED_CLUSTERS: os.path.join(
|
|
||||||
output_dir, FileNames.MIGRATED_CLUSTERS.value
|
|
||||||
),
|
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.debug(
|
logger.debug(
|
||||||
@@ -199,13 +218,11 @@ def generate_requirements(esp_matter_path, output_dir):
|
|||||||
)
|
)
|
||||||
|
|
||||||
logger.debug("Finding migrated clusters...")
|
logger.debug("Finding migrated clusters...")
|
||||||
is_generated, error_message = generate_migrated_clusters(
|
is_generated, error_message = generate_migrated_clusters(root_cluster_server_dir)
|
||||||
root_cluster_server_dir, file_paths[FileNames.MIGRATED_CLUSTERS]
|
|
||||||
)
|
|
||||||
if not is_generated:
|
if not is_generated:
|
||||||
raise CodeGenerationError(
|
raise CodeGenerationError(
|
||||||
error_message,
|
error_message,
|
||||||
file_path=file_paths[FileNames.MIGRATED_CLUSTERS],
|
file_path=os.path.join(ARTIFACTS_DIR, FileNames.MIGRATED_CLUSTERS.value),
|
||||||
context="generate_requirements",
|
context="generate_requirements",
|
||||||
suggestion=f"Check {root_cluster_server_dir} and write permissions.",
|
suggestion=f"Check {root_cluster_server_dir} and write permissions.",
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -36,6 +36,10 @@ COLORED_FORMATTER = ColoredFormatter(
|
|||||||
DEFAULT_OUTPUT_DIR = "out"
|
DEFAULT_OUTPUT_DIR = "out"
|
||||||
DEFAULT_CHIP_VERSION = "1.5"
|
DEFAULT_CHIP_VERSION = "1.5"
|
||||||
|
|
||||||
|
ARTIFACTS_DIR = os.path.join(
|
||||||
|
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "artifacts"
|
||||||
|
)
|
||||||
|
|
||||||
SPECIFICATION_VERSIONS = ["1.1", "1.2", "1.3", "1.4", "1.4.2", "1.5", "1.6"]
|
SPECIFICATION_VERSIONS = ["1.1", "1.2", "1.3", "1.4", "1.4.2", "1.5", "1.6"]
|
||||||
|
|
||||||
ALLOW_PROVISIONAL = False
|
ALLOW_PROVISIONAL = False
|
||||||
|
|||||||
@@ -19,22 +19,24 @@ Loads JSON artifacts once per run to avoid repeated file I/O.
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
|
||||||
from utils.config import FileNames
|
from utils.config import ARTIFACTS_DIR, FileNames
|
||||||
from utils.conversion_utils import hex_to_int
|
from utils.conversion_utils import hex_to_int
|
||||||
|
|
||||||
|
|
||||||
def load_cluster_parse_context(output_dir: str):
|
def load_cluster_parse_context(output_dir: str):
|
||||||
"""Load all cluster-related metadata from output_dir into a single context."""
|
"""Load all cluster-related metadata from output_dir into a single context."""
|
||||||
|
|
||||||
def _load_json(filename: str):
|
def _load_json(filename: str, base_dir: str = output_dir):
|
||||||
path = os.path.join(output_dir, filename)
|
path = os.path.join(base_dir, filename)
|
||||||
with open(path, "r") as f:
|
with open(path, "r") as f:
|
||||||
return json.load(f)
|
return json.load(f)
|
||||||
|
|
||||||
return ClusterParseContext(
|
return ClusterParseContext(
|
||||||
delegate_clusters=_load_json(FileNames.DELEGATE_CLUSTERS.value),
|
delegate_clusters=_load_json(FileNames.DELEGATE_CLUSTERS.value),
|
||||||
plugin_init_cb_clusters=_load_json(FileNames.PLUGIN_INIT_CB_CLUSTERS.value),
|
plugin_init_cb_clusters=_load_json(FileNames.PLUGIN_INIT_CB_CLUSTERS.value),
|
||||||
migrated_clusters=_load_json(FileNames.MIGRATED_CLUSTERS.value),
|
migrated_clusters=_load_json(
|
||||||
|
FileNames.MIGRATED_CLUSTERS.value, base_dir=ARTIFACTS_DIR
|
||||||
|
)["migrated_cluster"],
|
||||||
zap_filter_list=_load_json(FileNames.ZAP_FILTER_LIST.value),
|
zap_filter_list=_load_json(FileNames.ZAP_FILTER_LIST.value),
|
||||||
internally_managed_attributes=_load_json(
|
internally_managed_attributes=_load_json(
|
||||||
FileNames.INTERNALLY_MANAGED_ATTRIBUTES.value
|
FileNames.INTERNALLY_MANAGED_ATTRIBUTES.value
|
||||||
|
|||||||
Reference in New Issue
Block a user