Merge branch 'skip-codegen-impl-clusters' into 'main'

Skip Cluster Init/shutdown callbacks generation for clusters using codegen/ implementations

See merge request app-frameworks/esp-matter!1515
This commit is contained in:
Hrishikesh Dhayagude
2026-04-24 13:16:52 +08:00
8 changed files with 124 additions and 61 deletions
+3
View File
@@ -0,0 +1,3 @@
# Collapse generated/auto-generated code in MR diffs (GitLab)
components/esp_matter/data_model/generated/** gitlab-generated
components/esp_matter/zap_common/** gitlab-generated
@@ -26,7 +26,6 @@
#include <unit_localization_ids.h>
#include <binding.h>
#include <esp_matter_data_model_priv.h>
#include <app/ClusterCallbacks.h>
using namespace chip::app::Clusters;
using chip::app::CommandHandler;
@@ -49,12 +48,11 @@ uint32_t get_id()
return TemperatureUnit::Id;
}
esp_err_t add(cluster_t *cluster, config_t *config)
esp_err_t add(cluster_t *cluster)
{
VerifyOrReturnError(cluster, ESP_ERR_INVALID_ARG);
VerifyOrReturnError(config, ESP_ERR_INVALID_ARG);
update_feature_map(cluster, get_id());
attribute::create_temperature_unit(cluster, config->temperature_unit);
attribute::create_temperature_unit(cluster, 0);
attribute::create_supported_temperature_units(cluster, NULL, 0, 0);
return ESP_OK;
@@ -68,9 +66,7 @@ attribute_t *create_temperature_unit(cluster_t *cluster, uint8_t value)
{
uint32_t feature_map = get_feature_map_value(cluster);
VerifyOrReturnValue(has_feature(temperature_unit), NULL);
attribute_t *attribute = esp_matter::attribute::create(cluster, TemperatureUnit::Id, ATTRIBUTE_FLAG_WRITABLE | ATTRIBUTE_FLAG_NONVOLATILE, esp_matter_enum8(value));
esp_matter::attribute::add_bounds(attribute, esp_matter_enum8(0), esp_matter_enum8(2));
return attribute;
return esp_matter::attribute::create(cluster, TemperatureUnit::Id, ATTRIBUTE_FLAG_WRITABLE | ATTRIBUTE_FLAG_MANAGED_INTERNALLY | ATTRIBUTE_FLAG_NONVOLATILE, esp_matter_enum8(value));
}
attribute_t *create_supported_temperature_units(cluster_t *cluster, uint8_t *value, uint16_t length, uint16_t count)
@@ -102,8 +98,6 @@ cluster_t *create(endpoint_t *endpoint, config_t *config, uint8_t flags)
/* Attributes not managed internally */
global::attribute::create_cluster_revision(cluster, cluster_revision);
cluster::set_init_and_shutdown_callbacks(cluster, ESPMatterUnitLocalizationClusterServerInitCallback,
ESPMatterUnitLocalizationClusterServerShutdownCallback);
}
return cluster;
@@ -23,12 +23,8 @@ namespace unit_localization {
namespace feature {
namespace temperature_unit {
typedef struct config {
uint8_t temperature_unit;
config() : temperature_unit(0) {}
} config_t;
uint32_t get_id();
esp_err_t add(cluster_t *cluster, config_t *config);
esp_err_t add(cluster_t *cluster);
} /* temperature_unit */
} /* feature */
@@ -0,0 +1,41 @@
{
"migrated_cluster": [
"access_control",
"administrator_commissioning",
"basic_information",
"binding",
"boolean_state_configuration",
"chime",
"descriptor",
"device_energy_management",
"diagnostic_logs",
"electrical_energy_measurement",
"electrical_power_measurement",
"ethernet_network_diagnostics",
"fixed_label",
"general_commissioning",
"general_diagnostics",
"group_key_management",
"groupcast",
"icd_management",
"identify",
"localization_configuration",
"network_commissioning",
"occupancy_sensor",
"operational_credentials",
"ota_software_update_provider",
"power_topology",
"push_av_stream_transport",
"resource_monitoring",
"scenes",
"software_diagnostics",
"soil_measurement",
"time_format_localization",
"time_synchronization",
"user_label",
"webrtc_transport_provider",
"webrtc_transport_requestor",
"wifi_network_diagnostics"
],
"migrated_cluster_with_codegen_impl": []
}
@@ -92,9 +92,15 @@ def find_delegate_server_files(root_dir):
"""
delegate_server_files = set()
for dirpath, _, filenames in os.walk(root_dir):
for dirpath, dirnames, filenames in os.walk(root_dir):
if "codegen" in dirnames:
scan_dir = os.path.join(dirpath, "codegen")
filenames = os.listdir(scan_dir)
dirnames[:] = []
else:
scan_dir = dirpath
for filename in filenames:
full_path = os.path.join(dirpath, filename)
full_path = os.path.join(scan_dir, filename)
# Check for delegate callback in server files
if "-delegate.h" in filename.lower():
+56 -39
View File
@@ -13,7 +13,8 @@
# limitations under the License.
import os
import logging
from utils.config import FileNames
import subprocess
from utils.config import ARTIFACTS_DIR, FileNames
from utils.helper import write_to_file
from utils.exceptions import ConfigurationError, CodeGenerationError
from chip_source_deps.cluster_mapping import normalize_cluster_name
@@ -29,49 +30,70 @@ from chip_source_deps.internally_managed_attributes import (
logger = logging.getLogger(__name__)
def generate_migrated_clusters(
root_dir, migrated_clusters_json_file_path
) -> tuple[bool, str]:
"""Find all clusters that have a CodegenIntegration.cpp file
def _find_migrated_cluster_dirs(root_dir: str) -> set[str]:
"""Return the set of top-level cluster directories under ``root_dir`` that
contain a header declaring a class inheriting ``DefaultServerCluster``.
"""
result = subprocess.run(
["grep", "-rl", "--include=*.h", "public DefaultServerCluster", root_dir],
check=False,
capture_output=True,
text=True,
)
if result.returncode >= 2:
raise RuntimeError(f"grep failed ({result.returncode}): {result.stderr}")
abs_root = os.path.abspath(root_dir)
cluster_dirs: set[str] = set()
for header_path in result.stdout.splitlines():
rel = os.path.relpath(header_path, abs_root)
top = rel.split(os.sep, 1)[0]
if top and top != "..":
cluster_dirs.add(os.path.join(abs_root, top))
return cluster_dirs
def generate_migrated_clusters(root_dir) -> tuple[bool, str]:
"""Find all migrated clusters and write them to ``Artifacts/migrated_clusters.json``.
The JSON holds two lists:
- ``migrated_cluster``: clusters using ``DefaultServerCluster`` at the
cluster top level; these feed data_model_gen.
- ``migrated_cluster_with_codegen_impl``: clusters that own a ``codegen/``
subdir; they have legacy implementation as it is and are skipped by data_model_gen.
Args:
root_dir: The root directory to search in
migrated_clusters_json_file_path: Path to save the migrated clusters list
root_dir: The root directory to search in.
Returns:
True if successful, False otherwise
"""
migrated_clusters = []
migrated_clusters = set()
migrated_clusters_with_codegen_impl = set()
try:
for dirpath, _, filenames in os.walk(root_dir):
for filename in filenames:
if not filename.endswith(".cpp") and not filename.endswith(".h"):
for cluster_dir in _find_migrated_cluster_dirs(root_dir):
cluster_name = normalize_cluster_name(os.path.basename(cluster_dir))
if not cluster_name:
continue
if (
filename.lower() == "codegenintegration.cpp"
or filename.lower() == "codegeninstance.cpp"
):
cluster_name = os.path.basename(dirpath)
migrated_clusters.append(normalize_cluster_name(cluster_name))
if os.path.isdir(os.path.join(cluster_dir, "codegen")):
migrated_clusters_with_codegen_impl.add(cluster_name)
else:
with open(
os.path.join(dirpath, filename), "r", encoding="utf-8"
) as file:
if "DefaultServerCluster" in file.read():
cluster_name = os.path.basename(dirpath)
migrated_clusters.append(
normalize_cluster_name(cluster_name)
)
migrated_clusters.add(cluster_name)
migrated_clusters.sort()
payload = {
"migrated_cluster": sorted(migrated_clusters),
"migrated_cluster_with_codegen_impl": sorted(
migrated_clusters_with_codegen_impl
),
}
if write_to_file(migrated_clusters_json_file_path, migrated_clusters, "json"):
logger.info(
f"Successfully written Migrated Clusters to {migrated_clusters_json_file_path}"
)
os.makedirs(ARTIFACTS_DIR, exist_ok=True)
artifact_path = os.path.join(ARTIFACTS_DIR, FileNames.MIGRATED_CLUSTERS.value)
if write_to_file(artifact_path, payload, "json"):
logger.info(f"Successfully written Migrated Clusters to {artifact_path}")
return True, None
else:
return False, f"Error writing to {migrated_clusters_json_file_path}"
return False, f"Error writing to {artifact_path}"
except Exception as e:
return False, f"Error generating migrated clusters: {str(e)}"
@@ -133,9 +155,6 @@ def generate_requirements(esp_matter_path, output_dir):
FileNames.ZAP_FILTER_LIST: os.path.join(
output_dir, FileNames.ZAP_FILTER_LIST.value
),
FileNames.MIGRATED_CLUSTERS: os.path.join(
output_dir, FileNames.MIGRATED_CLUSTERS.value
),
}
logger.debug(
@@ -199,13 +218,11 @@ def generate_requirements(esp_matter_path, output_dir):
)
logger.debug("Finding migrated clusters...")
is_generated, error_message = generate_migrated_clusters(
root_cluster_server_dir, file_paths[FileNames.MIGRATED_CLUSTERS]
)
is_generated, error_message = generate_migrated_clusters(root_cluster_server_dir)
if not is_generated:
raise CodeGenerationError(
error_message,
file_path=file_paths[FileNames.MIGRATED_CLUSTERS],
file_path=os.path.join(ARTIFACTS_DIR, FileNames.MIGRATED_CLUSTERS.value),
context="generate_requirements",
suggestion=f"Check {root_cluster_server_dir} and write permissions.",
)
+4
View File
@@ -36,6 +36,10 @@ COLORED_FORMATTER = ColoredFormatter(
DEFAULT_OUTPUT_DIR = "out"
DEFAULT_CHIP_VERSION = "1.5"
ARTIFACTS_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))), "artifacts"
)
SPECIFICATION_VERSIONS = ["1.1", "1.2", "1.3", "1.4", "1.4.2", "1.5", "1.6"]
ALLOW_PROVISIONAL = False
@@ -19,22 +19,24 @@ Loads JSON artifacts once per run to avoid repeated file I/O.
import json
import os
from utils.config import FileNames
from utils.config import ARTIFACTS_DIR, FileNames
from utils.conversion_utils import hex_to_int
def load_cluster_parse_context(output_dir: str):
"""Load all cluster-related metadata from output_dir into a single context."""
def _load_json(filename: str):
path = os.path.join(output_dir, filename)
def _load_json(filename: str, base_dir: str = output_dir):
path = os.path.join(base_dir, filename)
with open(path, "r") as f:
return json.load(f)
return ClusterParseContext(
delegate_clusters=_load_json(FileNames.DELEGATE_CLUSTERS.value),
plugin_init_cb_clusters=_load_json(FileNames.PLUGIN_INIT_CB_CLUSTERS.value),
migrated_clusters=_load_json(FileNames.MIGRATED_CLUSTERS.value),
migrated_clusters=_load_json(
FileNames.MIGRATED_CLUSTERS.value, base_dir=ARTIFACTS_DIR
)["migrated_cluster"],
zap_filter_list=_load_json(FileNames.ZAP_FILTER_LIST.value),
internally_managed_attributes=_load_json(
FileNames.INTERNALLY_MANAGED_ATTRIBUTES.value