Skip to content

Commit

Permalink
Fix the boot loop crash on M5Stack (#24607)
Browse files Browse the repository at this point in the history
* Fix the boot loop crash on M5Stack

This is the regression from #24547, which is accessing the attributes
even before Server is ready. Moved the lock initialization after service is initialized

* Fix the typo for chip error format
  • Loading branch information
shubhamdp authored and lucicop committed Jan 24, 2023
1 parent 5e46313 commit c04e7b8
Show file tree
Hide file tree
Showing 5 changed files with 39 additions and 43 deletions.
14 changes: 9 additions & 5 deletions examples/all-clusters-app/esp32/main/AppTask.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,13 @@ void AppTask::ActionCompleted(BoltLockManager::Action_t aAction)
}
}

CHIP_ERROR AppTask::LockInit()
{
ReturnErrorOnFailure(BoltLockMgr().InitLockState());
BoltLockMgr().SetCallbacks(ActionInitiated, ActionCompleted);
return CHIP_NO_ERROR;
}

CHIP_ERROR AppTask::Init()
{
/* Print chip information */
Expand All @@ -179,10 +186,7 @@ CHIP_ERROR AppTask::Init()
(void *) this, // init timer id = app task obj context
TimerEventHandler // timer callback handler
);

CHIP_ERROR err = BoltLockMgr().InitLockState();

BoltLockMgr().SetCallbacks(ActionInitiated, ActionCompleted);
VerifyOrReturnError(sFunctionTimer != NULL, CHIP_ERROR_NO_MEMORY, ESP_LOGE(TAG, "Failed to create function selection timer"));

statusLED1.Init(STATUS_LED_GPIO_NUM);
// Our second LED doesn't map to any physical LEDs so far, just to virtual
Expand All @@ -199,7 +203,7 @@ CHIP_ERROR AppTask::Init()
InitDeviceDisplay();
#endif

return err;
return CHIP_NO_ERROR;
}

void AppTask::AppTaskMain(void * pvParameter)
Expand Down
1 change: 1 addition & 0 deletions examples/all-clusters-app/esp32/main/include/AppTask.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ class AppTask
void PostEvent(const AppEvent * event);
void ButtonEventHandler(uint8_t btnIdx, uint8_t btnAction);
static void ButtonPressedAction(AppEvent * aEvent);
CHIP_ERROR LockInit();

private:
CHIP_ERROR Init();
Expand Down
7 changes: 7 additions & 0 deletions examples/all-clusters-app/esp32/main/main.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,13 @@ static void InitServer(intptr_t context)
emberAfEndpointEnableDisable(kNetworkCommissioningEndpointSecondary, false);

InitBindingHandlers();

CHIP_ERROR err = GetAppTask().LockInit();
if (err != CHIP_NO_ERROR)
{
ESP_LOGE(TAG, "Failed to initialize app task lock, err:%" CHIP_ERROR_FORMAT, err.Format());
}

#if CONFIG_DEVICE_TYPE_M5STACK
SetupPretendDevices();
#endif
Expand Down
3 changes: 0 additions & 3 deletions examples/platform/esp32/lock/BoltLockManager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -764,7 +764,6 @@ CHIP_ERROR BoltLockManager::InitLockState()
// Initial lock state
chip::app::DataModel::Nullable<chip::app::Clusters::DoorLock::DlLockState> state;
chip::EndpointId endpointId{ 1 };
chip::DeviceLayer::PlatformMgr().LockChipStack();
chip::app::Clusters::DoorLock::Attributes::LockState::Get(endpointId, state);

uint8_t numberOfCredentialsPerUser = 0;
Expand Down Expand Up @@ -816,8 +815,6 @@ CHIP_ERROR BoltLockManager::InitLockState()
numberOfHolidaySchedules = 10;
}

chip::DeviceLayer::PlatformMgr().UnlockChipStack();

CHIP_ERROR err = BoltLockMgr().Init(state,
ParamBuilder()
.SetNumberOfUsers(numberOfUsers)
Expand Down
57 changes: 22 additions & 35 deletions src/python_testing/TC_RR_1_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,23 +86,7 @@ async def test_TC_RR_1_1(self):
# Do a read-out of heap statistics before the test begins
if check_heap_watermarks:
logging.info("Read Heap info before stress test")

diagnostics_contents = [
Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark,
Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed,
]
diagnostics_paths = [(0, attrib) for attrib in diagnostics_contents]
swdiag_info = await dev_ctrl.ReadAttribute(self.dut_node_id, diagnostics_paths)

# Make sure everything came back from the read that we expected
asserts.assert_true(0 in swdiag_info.keys(), "Must have read endpoint 0 data")
asserts.assert_true(Clusters.SoftwareDiagnostics in swdiag_info[0].keys(
), "Must have read Software Diagnostics cluster data")
for attribute in diagnostics_contents:
asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics],
"Must have read back attribute %s" % (attribute.__name__))
high_watermark_before = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark]
current_usage_before = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed]
high_watermark_before, current_usage_before = await self.read_heap_usage(dev_ctrl)

# Make sure all certificates are installed with maximal size
dev_ctrl.fabricAdmin.certificateAuthority.maximizeCertChains = True
Expand Down Expand Up @@ -432,23 +416,7 @@ async def test_TC_RR_1_1(self):
# Read heap watermarks after the test
if check_heap_watermarks:
logging.info("Read Heap info after stress test")

diagnostics_contents = [
Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark,
Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed,
]
diagnostics_paths = [(0, attrib) for attrib in diagnostics_contents]
swdiag_info = await dev_ctrl.ReadAttribute(self.dut_node_id, diagnostics_paths)

# Make sure everything came back from the read that we expected
asserts.assert_true(0 in swdiag_info.keys(), "Must have read endpoint 0 data")
asserts.assert_true(Clusters.SoftwareDiagnostics in swdiag_info[0].keys(
), "Must have read Software Diagnostics cluster data")
for attribute in diagnostics_contents:
asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics],
"Must have read back attribute %s" % (attribute.__name__))
high_watermark_after = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark]
current_usage_after = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed]
high_watermark_after, current_usage_after = await self.read_heap_usage(dev_ctrl)
logging.info("=== Heap Usage Diagnostics ===\nHigh watermark: {} (before) / {} (after)\n"
"Current usage: {} (before) / {} (after)".format(high_watermark_before, high_watermark_after,
current_usage_before, current_usage_after))
Expand Down Expand Up @@ -666,7 +634,7 @@ def build_acl(self, fabric_number, client_by_name, num_controllers_per_fabric):
# - Subjects field: [0x3000_0000_0000_0001, 0x3000_0000_0000_0002, 0x3000_0000_0000_0003, 0x3000_0000_0000_0004]
# - Targets field: [{Cluster: 0xFFF1_FC40, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC41, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC42}]
# . struct
# - Privilege field: View (3)
# - Privilege field: View (1)
# - AuthMode field: CASE (2)
# - Subjects field: [0x4000_0000_0000_0001, 0x4000_0000_0000_0002, 0x4000_0000_0000_0003, 0x4000_0000_0000_0004]
# - Targets field: [{Cluster: 0xFFF1_FC80, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC81, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC82, DeviceType: 0xFFF1_FC22}]
Expand Down Expand Up @@ -748,6 +716,25 @@ def build_group_key(self, fabric_index: int, group_key_index: int, keys_per_fabr
epochKey2=self.random_string(16).encode(),
epochStartTime2=(set_id * 4 + 2))

async def read_heap_usage(self, dev_ctrl):
diagnostics_contents = [
Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark,
Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed,
]
diagnostics_paths = [(0, attrib) for attrib in diagnostics_contents]
swdiag_info = await dev_ctrl.ReadAttribute(self.dut_node_id, diagnostics_paths)

# Make sure everything came back from the read that we expected
asserts.assert_true(0 in swdiag_info.keys(), "Must have read endpoint 0 data")
asserts.assert_true(Clusters.SoftwareDiagnostics in swdiag_info[0].keys(
), "Must have read Software Diagnostics cluster data")
for attribute in diagnostics_contents:
asserts.assert_true(attribute in swdiag_info[0][Clusters.SoftwareDiagnostics],
"Must have read back attribute %s" % (attribute.__name__))
high_watermark = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapHighWatermark]
current_usage = swdiag_info[0][Clusters.SoftwareDiagnostics][Clusters.SoftwareDiagnostics.Attributes.CurrentHeapUsed]
return high_watermark, current_usage


if __name__ == "__main__":
default_matter_test_main(maximize_cert_chains=True, controller_cat_tags=[0x0001_0001])

0 comments on commit c04e7b8

Please sign in to comment.