diff --git a/examples/all-clusters-app/esp32/main/AppTask.cpp b/examples/all-clusters-app/esp32/main/AppTask.cpp index 2111d7cb32df47..9a04b23c689d4c 100644 --- a/examples/all-clusters-app/esp32/main/AppTask.cpp +++ b/examples/all-clusters-app/esp32/main/AppTask.cpp @@ -161,6 +161,13 @@ void AppTask::ActionCompleted(BoltLockManager::Action_t aAction) } } +CHIP_ERROR AppTask::LockInit() +{ + ReturnErrorOnFailure(BoltLockMgr().InitLockState()); + BoltLockMgr().SetCallbacks(ActionInitiated, ActionCompleted); + return CHIP_NO_ERROR; +} + CHIP_ERROR AppTask::Init() { /* Print chip information */ @@ -179,10 +186,7 @@ CHIP_ERROR AppTask::Init() (void *) this, // init timer id = app task obj context TimerEventHandler // timer callback handler ); - - CHIP_ERROR err = BoltLockMgr().InitLockState(); - - BoltLockMgr().SetCallbacks(ActionInitiated, ActionCompleted); + VerifyOrReturnError(sFunctionTimer != NULL, CHIP_ERROR_NO_MEMORY, ESP_LOGE(TAG, "Failed to create function selection timer")); statusLED1.Init(STATUS_LED_GPIO_NUM); // Our second LED doesn't map to any physical LEDs so far, just to virtual @@ -199,7 +203,7 @@ CHIP_ERROR AppTask::Init() InitDeviceDisplay(); #endif - return err; + return CHIP_NO_ERROR; } void AppTask::AppTaskMain(void * pvParameter) diff --git a/examples/all-clusters-app/esp32/main/include/AppTask.h b/examples/all-clusters-app/esp32/main/include/AppTask.h index 8a72b2d47e5777..779e7e990c12a7 100644 --- a/examples/all-clusters-app/esp32/main/include/AppTask.h +++ b/examples/all-clusters-app/esp32/main/include/AppTask.h @@ -39,6 +39,7 @@ class AppTask void PostEvent(const AppEvent * event); void ButtonEventHandler(uint8_t btnIdx, uint8_t btnAction); static void ButtonPressedAction(AppEvent * aEvent); + CHIP_ERROR LockInit(); private: CHIP_ERROR Init(); diff --git a/examples/all-clusters-app/esp32/main/main.cpp b/examples/all-clusters-app/esp32/main/main.cpp index 1447638bcce0b1..62563025279cbf 100644 --- a/examples/all-clusters-app/esp32/main/main.cpp +++ b/examples/all-clusters-app/esp32/main/main.cpp @@ -115,6 +115,13 @@ static void InitServer(intptr_t context) emberAfEndpointEnableDisable(kNetworkCommissioningEndpointSecondary, false); InitBindingHandlers(); + + CHIP_ERROR err = GetAppTask().LockInit(); + if (err != CHIP_NO_ERROR) + { + ESP_LOGE(TAG, "Failed to initialize app task lock, err:%" CHIP_ERROR_FORMAT, err.Format()); + } + #if CONFIG_DEVICE_TYPE_M5STACK SetupPretendDevices(); #endif diff --git a/examples/platform/esp32/lock/BoltLockManager.cpp b/examples/platform/esp32/lock/BoltLockManager.cpp index 48c355f24eaca4..6115c15b1bf75d 100644 --- a/examples/platform/esp32/lock/BoltLockManager.cpp +++ b/examples/platform/esp32/lock/BoltLockManager.cpp @@ -764,7 +764,6 @@ CHIP_ERROR BoltLockManager::InitLockState() // Initial lock state chip::app::DataModel::Nullable state; chip::EndpointId endpointId{ 1 }; - chip::DeviceLayer::PlatformMgr().LockChipStack(); chip::app::Clusters::DoorLock::Attributes::LockState::Get(endpointId, state); uint8_t numberOfCredentialsPerUser = 0; @@ -816,8 +815,6 @@ CHIP_ERROR BoltLockManager::InitLockState() numberOfHolidaySchedules = 10; } - chip::DeviceLayer::PlatformMgr().UnlockChipStack(); - CHIP_ERROR err = BoltLockMgr().Init(state, ParamBuilder() .SetNumberOfUsers(numberOfUsers) diff --git a/src/python_testing/TC_RR_1_1.py b/src/python_testing/TC_RR_1_1.py index d22b0aca8362bb..18abbbfa759063 100644 --- a/src/python_testing/TC_RR_1_1.py +++ b/src/python_testing/TC_RR_1_1.py @@ -86,23 +86,7 @@ async def test_TC_RR_1_1(self): # Do a read-out of heap statistics before the test begins if check_heap_watermarks: logging.info("Read Heap info before stress test") - - diagnostics_contents = [ - Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark, - Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed, - ] - diagnostics_paths = [(0, attrib) for attrib in diagnostics_contents] - swdiag_info = await dev_ctrl.ReadAttribute(self.dut_node_id, diagnostics_paths) - - # Make sure everything came back from the read that we expected - asserts.assert_true(0 in swdiag_info.keys(), "Must have read endpoint 0 data") - asserts.assert_true(Clusters.SoftwareDiagnostics in swdiag_info[0].keys( - ), "Must have read Software Diagnostics cluster data") - for attribute in diagnostics_contents: - asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics], - "Must have read back attribute %s" % (attribute.__name__)) - high_watermark_before = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark] - current_usage_before = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed] + high_watermark_before, current_usage_before = await self.read_heap_usage(dev_ctrl) # Make sure all certificates are installed with maximal size dev_ctrl.fabricAdmin.certificateAuthority.maximizeCertChains = True @@ -432,23 +416,7 @@ async def test_TC_RR_1_1(self): # Read heap watermarks after the test if check_heap_watermarks: logging.info("Read Heap info after stress test") - - diagnostics_contents = [ - Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark, - Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed, - ] - diagnostics_paths = [(0, attrib) for attrib in diagnostics_contents] - swdiag_info = await dev_ctrl.ReadAttribute(self.dut_node_id, diagnostics_paths) - - # Make sure everything came back from the read that we expected - asserts.assert_true(0 in swdiag_info.keys(), "Must have read endpoint 0 data") - asserts.assert_true(Clusters.SoftwareDiagnostics in swdiag_info[0].keys( - ), "Must have read Software Diagnostics cluster data") - for attribute in diagnostics_contents: - asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics], - "Must have read back attribute %s" % (attribute.__name__)) - high_watermark_after = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark] - current_usage_after = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed] + high_watermark_after, current_usage_after = await self.read_heap_usage(dev_ctrl) logging.info("=== Heap Usage Diagnostics ===\nHigh watermark: {} (before) / {} (after)\n" "Current usage: {} (before) / {} (after)".format(high_watermark_before, high_watermark_after, current_usage_before, current_usage_after)) @@ -666,7 +634,7 @@ def build_acl(self, fabric_number, client_by_name, num_controllers_per_fabric): # - Subjects field: [0x3000_0000_0000_0001, 0x3000_0000_0000_0002, 0x3000_0000_0000_0003, 0x3000_0000_0000_0004] # - Targets field: [{Cluster: 0xFFF1_FC40, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC41, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC42}] # . struct - # - Privilege field: View (3) + # - Privilege field: View (1) # - AuthMode field: CASE (2) # - Subjects field: [0x4000_0000_0000_0001, 0x4000_0000_0000_0002, 0x4000_0000_0000_0003, 0x4000_0000_0000_0004] # - Targets field: [{Cluster: 0xFFF1_FC80, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC81, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC82, DeviceType: 0xFFF1_FC22}] @@ -748,6 +716,25 @@ def build_group_key(self, fabric_index: int, group_key_index: int, keys_per_fabr epochKey2=self.random_string(16).encode(), epochStartTime2=(set_id * 4 + 2)) + async def read_heap_usage(self, dev_ctrl): + diagnostics_contents = [ + Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark, + Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed, + ] + diagnostics_paths = [(0, attrib) for attrib in diagnostics_contents] + swdiag_info = await dev_ctrl.ReadAttribute(self.dut_node_id, diagnostics_paths) + + # Make sure everything came back from the read that we expected + asserts.assert_true(0 in swdiag_info.keys(), "Must have read endpoint 0 data") + asserts.assert_true(Clusters.SoftwareDiagnostics in swdiag_info[0].keys( + ), "Must have read Software Diagnostics cluster data") + for attribute in diagnostics_contents: + asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics], + "Must have read back attribute %s" % (attribute.__name__)) + high_watermark = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark] + current_usage = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed] + return high_watermark, current_usage + if __name__ == "__main__": default_matter_test_main(maximize_cert_chains=True, controller_cat_tags=[0x0001_0001])