diff --git a/examples/chip-tool/commands/common/CHIPCommand.cpp b/examples/chip-tool/commands/common/CHIPCommand.cpp index 663ebee8257d61..89dd1d536e4550 100644 --- a/examples/chip-tool/commands/common/CHIPCommand.cpp +++ b/examples/chip-tool/commands/common/CHIPCommand.cpp @@ -358,6 +358,12 @@ CHIP_ERROR CHIPCommand::InitializeCommissioner(std::string key, chip::FabricId f // store the credentials in persistent storage, and // generate when not available in the storage. ReturnLogErrorOnFailure(mCommissionerStorage.Init(key.c_str())); + if (mUseMaxSizedCerts.HasValue()) + { + auto option = CredentialIssuerCommands::CredentialIssuerOptions::kMaximizeCertificateSizes; + mCredIssuerCmds->SetCredentialIssuerOption(option, mUseMaxSizedCerts.Value()); + } + ReturnLogErrorOnFailure(mCredIssuerCmds->InitializeCredentialsIssuer(mCommissionerStorage)); chip::MutableByteSpan nocSpan(noc.Get(), chip::Controller::kMaxCHIPDERCertLength); diff --git a/examples/chip-tool/commands/common/CHIPCommand.h b/examples/chip-tool/commands/common/CHIPCommand.h index 1714928ea1096c..7dd36a7c7d6214 100644 --- a/examples/chip-tool/commands/common/CHIPCommand.h +++ b/examples/chip-tool/commands/common/CHIPCommand.h @@ -70,6 +70,9 @@ class CHIPCommand : public Command "4. The default if not specified is \"alpha\"."); AddArgument("commissioner-nodeid", 0, UINT64_MAX, &mCommissionerNodeId, "The node id to use for chip-tool. If not provided, kTestControllerNodeId (112233, 0x1B669) will be used."); + AddArgument("use-max-sized-certs", 0, 1, &mUseMaxSizedCerts, + "Maximize the size of operational certificates. If not provided or 0 (\"false\"), normally sized operational " + "certificates are generated."); #if CHIP_CONFIG_TRANSPORT_TRACE_ENABLED AddArgument("trace_file", &mTraceFile); AddArgument("trace_log", 0, 1, &mTraceLog); @@ -153,6 +156,7 @@ class CHIPCommand : public Command chip::Optional mCommissionerNodeId; chip::Optional mBleAdapterId; chip::Optional mPaaTrustStorePath; + chip::Optional mUseMaxSizedCerts; // Cached trust store so commands other than the original startup command // can spin up commissioners as needed. diff --git a/examples/chip-tool/commands/common/CredentialIssuerCommands.h b/examples/chip-tool/commands/common/CredentialIssuerCommands.h index 951ef86efceb40..cc04863ee2f8b6 100644 --- a/examples/chip-tool/commands/common/CredentialIssuerCommands.h +++ b/examples/chip-tool/commands/common/CredentialIssuerCommands.h @@ -74,4 +74,23 @@ class CredentialIssuerCommands virtual CHIP_ERROR GenerateControllerNOCChain(chip::NodeId nodeId, chip::FabricId fabricId, const chip::CATValues & cats, chip::Crypto::P256Keypair & keypair, chip::MutableByteSpan & rcac, chip::MutableByteSpan & icac, chip::MutableByteSpan & noc) = 0; + + // All options must start false + enum CredentialIssuerOptions : uint8_t + { + kMaximizeCertificateSizes = 0, // If set, certificate chains will be maximized for testing via padding + }; + + virtual void SetCredentialIssuerOption(CredentialIssuerOptions option, bool isEnabled) + { + // Do nothing + (void) option; + (void) isEnabled; + } + + virtual bool GetCredentialIssuerOption(CredentialIssuerOptions option) + { + // All options always start false + return false; + } }; diff --git a/examples/chip-tool/commands/example/ExampleCredentialIssuerCommands.h b/examples/chip-tool/commands/example/ExampleCredentialIssuerCommands.h index 74646c8b5f10ba..40a2871b19437b 100644 --- a/examples/chip-tool/commands/example/ExampleCredentialIssuerCommands.h +++ b/examples/chip-tool/commands/example/ExampleCredentialIssuerCommands.h @@ -49,6 +49,33 @@ class ExampleCredentialIssuerCommands : public CredentialIssuerCommands return mOpCredsIssuer.GenerateNOCChainAfterValidation(nodeId, fabricId, cats, keypair.Pubkey(), rcac, icac, noc); } + void SetCredentialIssuerOption(CredentialIssuerOptions option, bool isEnabled) override + { + switch (option) + { + case CredentialIssuerOptions::kMaximizeCertificateSizes: + mUsesMaxSizedCerts = isEnabled; + mOpCredsIssuer.SetMaximallyLargeCertsUsed(mUsesMaxSizedCerts); + break; + default: + break; + } + } + + bool GetCredentialIssuerOption(CredentialIssuerOptions option) override + { + switch (option) + { + case CredentialIssuerOptions::kMaximizeCertificateSizes: + return mUsesMaxSizedCerts; + default: + return false; + } + } + +protected: + bool mUsesMaxSizedCerts = false; + private: chip::Controller::ExampleOperationalCredentialsIssuer mOpCredsIssuer; }; diff --git a/src/controller/ExampleOperationalCredentialsIssuer.cpp b/src/controller/ExampleOperationalCredentialsIssuer.cpp index cc8cd8fbcdb404..9c0b376b13716c 100644 --- a/src/controller/ExampleOperationalCredentialsIssuer.cpp +++ b/src/controller/ExampleOperationalCredentialsIssuer.cpp @@ -39,6 +39,127 @@ using namespace Credentials; using namespace Crypto; using namespace TLV; +namespace { + +enum CertType : uint8_t +{ + kRcac = 0, + kIcac = 1, + kNoc = 2 +}; + +CHIP_ERROR IssueX509Cert(uint32_t now, uint32_t validity, ChipDN issuerDn, ChipDN desiredDn, CertType certType, bool maximizeSize, + const Crypto::P256PublicKey & subjectPublicKey, Crypto::P256Keypair & issuerKeypair, + MutableByteSpan & outX509Cert) +{ + constexpr size_t kDERCertDnEncodingOverhead = 11; + constexpr size_t kTLVCertDnEncodingOverhead = 3; + constexpr size_t kMaxCertPaddingLength = 150; + constexpr size_t kTLVDesiredSize = kMaxCHIPCertLength - 50; + + Platform::ScopedMemoryBuffer derBuf; + ReturnErrorCodeIf(!derBuf.Alloc(kMaxDERCertLength), CHIP_ERROR_NO_MEMORY); + MutableByteSpan derSpan{ derBuf.Get(), kMaxDERCertLength }; + + int64_t serialNumber = 1; + + switch (certType) + { + case CertType::kRcac: { + X509CertRequestParams rcacRequest = { serialNumber, now, now + validity, desiredDn, desiredDn }; + ReturnErrorOnFailure(NewRootX509Cert(rcacRequest, issuerKeypair, derSpan)); + break; + } + case CertType::kIcac: { + X509CertRequestParams icacRequest = { serialNumber, now, now + validity, desiredDn, issuerDn }; + ReturnErrorOnFailure(NewICAX509Cert(icacRequest, subjectPublicKey, issuerKeypair, derSpan)); + break; + } + case CertType::kNoc: { + X509CertRequestParams nocRequest = { serialNumber, now, now + validity, desiredDn, issuerDn }; + ReturnErrorOnFailure(NewNodeOperationalX509Cert(nocRequest, subjectPublicKey, issuerKeypair, derSpan)); + break; + } + default: + return CHIP_ERROR_INVALID_ARGUMENT; + } + + if (maximizeSize && (desiredDn.RDNCount() < CHIP_CONFIG_CERT_MAX_RDN_ATTRIBUTES)) + { + Platform::ScopedMemoryBuffer paddedTlvBuf; + ReturnErrorCodeIf(!paddedTlvBuf.Alloc(kMaxCHIPCertLength + kMaxCertPaddingLength), CHIP_ERROR_NO_MEMORY); + MutableByteSpan paddedTlvSpan{ paddedTlvBuf.Get(), kMaxCHIPCertLength + kMaxCertPaddingLength }; + ReturnErrorOnFailure(ConvertX509CertToChipCert(derSpan, paddedTlvSpan)); + + Platform::ScopedMemoryBuffer paddedDerBuf; + ReturnErrorCodeIf(!paddedDerBuf.Alloc(kMaxDERCertLength + kMaxCertPaddingLength), CHIP_ERROR_NO_MEMORY); + MutableByteSpan paddedDerSpan{ paddedDerBuf.Get(), kMaxDERCertLength + kMaxCertPaddingLength }; + + Platform::ScopedMemoryBuffer fillerBuf; + ReturnErrorCodeIf(!fillerBuf.Alloc(kMaxCertPaddingLength), CHIP_ERROR_NO_MEMORY); + memset(fillerBuf.Get(), 'A', kMaxCertPaddingLength); + + int derPaddingLen = static_cast(kMaxDERCertLength - kDERCertDnEncodingOverhead - derSpan.size()); + int tlvPaddingLen = static_cast(kTLVDesiredSize - kTLVCertDnEncodingOverhead - paddedTlvSpan.size()); + if (certType == CertType::kRcac) + { + // For RCAC the issuer/subject DN are the same so padding will be present in both + derPaddingLen = (derPaddingLen - static_cast(kDERCertDnEncodingOverhead)) / 2; + tlvPaddingLen = (tlvPaddingLen - static_cast(kTLVCertDnEncodingOverhead)) / 2; + } + + size_t paddingLen = 0; + if (derPaddingLen >= 1 && tlvPaddingLen >= 1) + { + paddingLen = std::min(static_cast(std::min(derPaddingLen, tlvPaddingLen)), kMaxCertPaddingLength); + } + + for (; paddingLen > 0; paddingLen--) + { + paddedDerSpan = MutableByteSpan{ paddedDerBuf.Get(), kMaxDERCertLength + kMaxCertPaddingLength }; + paddedTlvSpan = MutableByteSpan{ paddedTlvBuf.Get(), kMaxCHIPCertLength + kMaxCertPaddingLength }; + + ChipDN certDn = desiredDn; + // Fill the padding in the DomainNameQualifier DN + certDn.AddAttribute_DNQualifier(CharSpan(fillerBuf.Get(), paddingLen), false); + + switch (certType) + { + case CertType::kRcac: { + X509CertRequestParams rcacRequest = { serialNumber, now, now + validity, certDn, certDn }; + ReturnErrorOnFailure(NewRootX509Cert(rcacRequest, issuerKeypair, paddedDerSpan)); + break; + } + case CertType::kIcac: { + X509CertRequestParams icacRequest = { serialNumber, now, now + validity, certDn, issuerDn }; + ReturnErrorOnFailure(NewICAX509Cert(icacRequest, subjectPublicKey, issuerKeypair, paddedDerSpan)); + break; + } + case CertType::kNoc: { + X509CertRequestParams nocRequest = { serialNumber, now, now + validity, certDn, issuerDn }; + ReturnErrorOnFailure(NewNodeOperationalX509Cert(nocRequest, subjectPublicKey, issuerKeypair, paddedDerSpan)); + break; + } + default: + return CHIP_ERROR_INVALID_ARGUMENT; + } + + ReturnErrorOnFailure(ConvertX509CertToChipCert(paddedDerSpan, paddedTlvSpan)); + + ChipLogProgress(Controller, "Generated maximized certificate with %u DER bytes, %u TLV bytes", + static_cast(paddedDerSpan.size()), static_cast(paddedTlvSpan.size())); + if (paddedDerSpan.size() <= kMaxDERCertLength && paddedTlvSpan.size() <= kMaxCHIPCertLength) + { + return CopySpanToMutableSpan(paddedDerSpan, outX509Cert); + } + } + } + + return CopySpanToMutableSpan(derSpan, outX509Cert); +} + +} // namespace + CHIP_ERROR ExampleOperationalCredentialsIssuer::Initialize(PersistentStorageDelegate & storage) { using namespace ASN1; @@ -122,6 +243,12 @@ CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChainAfterValidation( uint16_t rcacBufLen = static_cast(std::min(rcac.size(), static_cast(UINT16_MAX))); PERSISTENT_KEY_OP(mIndex, kOperationalCredentialsRootCertificateStorage, key, err = mStorage->SyncGetKeyValue(key, rcac.data(), rcacBufLen)); + // Always regenerate RCAC on maximally sized certs. The keys remain the same, so everything is fine. + if (mUseMaximallySizedCerts) + { + err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND; + } + if (err == CHIP_NO_ERROR) { uint64_t rcacId; @@ -137,10 +264,14 @@ CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChainAfterValidation( ReturnErrorOnFailure(rcac_dn.AddAttribute_MatterRCACId(mIssuerId)); ChipLogProgress(Controller, "Generating RCAC"); - X509CertRequestParams rcac_request = { 0, mNow, mNow + mValidity, rcac_dn, rcac_dn }; - ReturnErrorOnFailure(NewRootX509Cert(rcac_request, mIssuer, rcac)); - + ReturnErrorOnFailure(IssueX509Cert(mNow, mValidity, rcac_dn, rcac_dn, CertType::kRcac, mUseMaximallySizedCerts, + mIssuer.Pubkey(), mIssuer, rcac)); VerifyOrReturnError(CanCastTo(rcac.size()), CHIP_ERROR_INTERNAL); + + // Re-extract DN based on final generated cert + rcac_dn = ChipDN{}; + ReturnErrorOnFailure(ExtractSubjectDNFromX509Cert(rcac, rcac_dn)); + PERSISTENT_KEY_OP(mIndex, kOperationalCredentialsRootCertificateStorage, key, ReturnErrorOnFailure(mStorage->SyncSetKeyValue(key, rcac.data(), static_cast(rcac.size())))); } @@ -149,6 +280,11 @@ CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChainAfterValidation( uint16_t icacBufLen = static_cast(std::min(icac.size(), static_cast(UINT16_MAX))); PERSISTENT_KEY_OP(mIndex, kOperationalCredentialsIntermediateCertificateStorage, key, err = mStorage->SyncGetKeyValue(key, icac.data(), icacBufLen)); + // Always regenerate ICAC on maximally sized certs. The keys remain the same, so everything is fine. + if (mUseMaximallySizedCerts) + { + err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND; + } if (err == CHIP_NO_ERROR) { uint64_t icacId; @@ -164,10 +300,14 @@ CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChainAfterValidation( ReturnErrorOnFailure(icac_dn.AddAttribute_MatterICACId(mIntermediateIssuerId)); ChipLogProgress(Controller, "Generating ICAC"); - X509CertRequestParams icac_request = { 0, mNow, mNow + mValidity, icac_dn, rcac_dn }; - ReturnErrorOnFailure(NewICAX509Cert(icac_request, mIntermediateIssuer.Pubkey(), mIssuer, icac)); - + ReturnErrorOnFailure(IssueX509Cert(mNow, mValidity, rcac_dn, icac_dn, CertType::kIcac, mUseMaximallySizedCerts, + mIntermediateIssuer.Pubkey(), mIssuer, icac)); VerifyOrReturnError(CanCastTo(icac.size()), CHIP_ERROR_INTERNAL); + + // Re-extract DN based on final generated cert + icac_dn = ChipDN{}; + ReturnErrorOnFailure(ExtractSubjectDNFromX509Cert(icac, icac_dn)); + PERSISTENT_KEY_OP(mIndex, kOperationalCredentialsIntermediateCertificateStorage, key, ReturnErrorOnFailure(mStorage->SyncSetKeyValue(key, icac.data(), static_cast(icac.size())))); } @@ -178,8 +318,8 @@ CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChainAfterValidation( ReturnErrorOnFailure(noc_dn.AddCATs(cats)); ChipLogProgress(Controller, "Generating NOC"); - X509CertRequestParams noc_request = { 1, mNow, mNow + mValidity, noc_dn, icac_dn }; - return NewNodeOperationalX509Cert(noc_request, pubkey, mIntermediateIssuer, noc); + return IssueX509Cert(mNow, mValidity, icac_dn, noc_dn, CertType::kNoc, mUseMaximallySizedCerts, pubkey, mIntermediateIssuer, + noc); } CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChain(const ByteSpan & csrElements, const ByteSpan & csrNonce, @@ -227,16 +367,16 @@ CHIP_ERROR ExampleOperationalCredentialsIssuer::GenerateNOCChain(const ByteSpan ReturnErrorOnFailure(VerifyCertificateSigningRequest(csr.data(), csr.size(), pubkey)); chip::Platform::ScopedMemoryBuffer noc; - ReturnErrorCodeIf(!noc.Alloc(kMaxCHIPDERCertLength), CHIP_ERROR_NO_MEMORY); - MutableByteSpan nocSpan(noc.Get(), kMaxCHIPDERCertLength); + ReturnErrorCodeIf(!noc.Alloc(kMaxDERCertLength), CHIP_ERROR_NO_MEMORY); + MutableByteSpan nocSpan(noc.Get(), kMaxDERCertLength); chip::Platform::ScopedMemoryBuffer icac; - ReturnErrorCodeIf(!icac.Alloc(kMaxCHIPDERCertLength), CHIP_ERROR_NO_MEMORY); - MutableByteSpan icacSpan(icac.Get(), kMaxCHIPDERCertLength); + ReturnErrorCodeIf(!icac.Alloc(kMaxDERCertLength), CHIP_ERROR_NO_MEMORY); + MutableByteSpan icacSpan(icac.Get(), kMaxDERCertLength); chip::Platform::ScopedMemoryBuffer rcac; - ReturnErrorCodeIf(!rcac.Alloc(kMaxCHIPDERCertLength), CHIP_ERROR_NO_MEMORY); - MutableByteSpan rcacSpan(rcac.Get(), kMaxCHIPDERCertLength); + ReturnErrorCodeIf(!rcac.Alloc(kMaxDERCertLength), CHIP_ERROR_NO_MEMORY); + MutableByteSpan rcacSpan(rcac.Get(), kMaxDERCertLength); ReturnErrorOnFailure( GenerateNOCChainAfterValidation(assignedId, mNextFabricId, chip::kUndefinedCATs, pubkey, rcacSpan, icacSpan, nocSpan)); diff --git a/src/controller/ExampleOperationalCredentialsIssuer.h b/src/controller/ExampleOperationalCredentialsIssuer.h index a85684cf6957e2..6e3b1e554d4288 100644 --- a/src/controller/ExampleOperationalCredentialsIssuer.h +++ b/src/controller/ExampleOperationalCredentialsIssuer.h @@ -65,6 +65,8 @@ class DLL_EXPORT ExampleOperationalCredentialsIssuer : public OperationalCredent mNodeIdRequested = true; } + void SetMaximallyLargeCertsUsed(bool areMaximallyLargeCertsUsed) { mUseMaximallySizedCerts = areMaximallyLargeCertsUsed; } + void SetFabricIdForNextNOCRequest(FabricId fabricId) override { mNextFabricId = fabricId; } /** @@ -108,8 +110,8 @@ class DLL_EXPORT ExampleOperationalCredentialsIssuer : public OperationalCredent Crypto::P256Keypair mIssuer; Crypto::P256Keypair mIntermediateIssuer; bool mInitialized = false; - uint32_t mIssuerId = 0; - uint32_t mIntermediateIssuerId = 1; + uint32_t mIssuerId = 1; + uint32_t mIntermediateIssuerId = 2; uint32_t mNow = 0; // By default, let's set validity to 10 years @@ -117,6 +119,7 @@ class DLL_EXPORT ExampleOperationalCredentialsIssuer : public OperationalCredent NodeId mNextAvailableNodeId = 1; PersistentStorageDelegate * mStorage = nullptr; + bool mUseMaximallySizedCerts = false; NodeId mNextRequestedNodeId = 1; FabricId mNextFabricId = 1; diff --git a/src/controller/python/OpCredsBinding.cpp b/src/controller/python/OpCredsBinding.cpp index e6ed27a6da1db3..cb714ae7385fc3 100644 --- a/src/controller/python/OpCredsBinding.cpp +++ b/src/controller/python/OpCredsBinding.cpp @@ -77,6 +77,8 @@ class OperationalCredentialsAdapter : public OperationalCredentialsDelegate return mExampleOpCredsIssuer.GenerateNOCChainAfterValidation(nodeId, fabricId, cats, pubKey, rcac, icac, noc); } + void SetMaximallyLargeCertsUsed(bool enabled) { mExampleOpCredsIssuer.SetMaximallyLargeCertsUsed(enabled); } + private: CHIP_ERROR GenerateNOCChain(const ByteSpan & csrElements, const ByteSpan & csrNonce, const ByteSpan & attestationSignature, const ByteSpan & attestationChallenge, const ByteSpan & DAC, const ByteSpan & PAI, @@ -360,9 +362,10 @@ ChipError::StorageType pychip_OpCreds_AllocateController(OpCredsContext * contex CATValues catValues; - if ((caseAuthTagLen + 1) > kMaxSubjectCATAttributeCount) + if (caseAuthTagLen > kMaxSubjectCATAttributeCount) { - ChipLogError(Controller, "# of CASE Tags exceeds kMaxSubjectCATAttributeCount"); + ChipLogError(Controller, "Too many of CASE Tags (%u) exceeds kMaxSubjectCATAttributeCount", + static_cast(caseAuthTagLen)); return CHIP_ERROR_INVALID_ARGUMENT.AsInteger(); } @@ -414,6 +417,15 @@ ChipError::StorageType pychip_OpCreds_AllocateController(OpCredsContext * contex return CHIP_NO_ERROR.AsInteger(); } +ChipError::StorageType pychip_OpCreds_SetMaximallyLargeCertsUsed(OpCredsContext * context, bool enabled) +{ + VerifyOrReturnError(context != nullptr && context->mAdapter != nullptr, CHIP_ERROR_INCORRECT_STATE.AsInteger()); + + context->mAdapter->SetMaximallyLargeCertsUsed(enabled); + + return CHIP_NO_ERROR.AsInteger(); +} + void pychip_OpCreds_FreeDelegate(OpCredsContext * context) { Platform::Delete(context); diff --git a/src/controller/python/chip/CertificateAuthority.py b/src/controller/python/chip/CertificateAuthority.py index aa9011fb60a250..7f40f0cd016100 100644 --- a/src/controller/python/chip/CertificateAuthority.py +++ b/src/controller/python/chip/CertificateAuthority.py @@ -45,7 +45,7 @@ class CertificateAuthority: Each CertificateAuthority instance is associated with a single instance of the OperationalCredentialsAdapter. This adapter instance implements the OperationalCredentialsDelegate and is meant to provide a Python adapter to the functions in that delegate. It relies on the in-built - ExampleOperationalCredentialsIssuer to then generate certificate material for the CA. This instance also uses the 'CA index' to + ExampleOperationalCredentialsIssuer to then generate certificate material for the CA. This instance also uses the 'CA index' to store/look-up the associated credential material from the provided PersistentStorage object. ''' @classmethod @@ -74,10 +74,14 @@ def __init__(self, chipStack: ChipStack.ChipStack, caIndex: int, persistentStora self._Handle().pychip_OpCreds_InitializeDelegate.restype = c_void_p self._Handle().pychip_OpCreds_InitializeDelegate.argtypes = [ctypes.py_object, ctypes.c_uint32, ctypes.c_void_p] + self._Handle().pychip_OpCreds_SetMaximallyLargeCertsUsed.restype = c_uint32 + self._Handle().pychip_OpCreds_SetMaximallyLargeCertsUsed.argtypes = [ctypes.c_void_p, ctypes.c_bool] + if (persistentStorage is None): persistentStorage = self._chipStack.GetStorageManager() self._persistentStorage = persistentStorage + self._maximizeCertChains = False self._closure = self._chipStack.Call( lambda: self._Handle().pychip_OpCreds_InitializeDelegate( @@ -181,6 +185,21 @@ def caIndex(self) -> int: def adminList(self) -> list[FabricAdmin.FabricAdmin]: return self._activeAdmins + @property + def maximizeCertChains(self) -> bool: + return self._maximizeCertChains + + @maximizeCertChains.setter + def maximizeCertChains(self, enabled: bool): + res = self._chipStack.Call( + lambda: self._Handle().pychip_OpCreds_SetMaximallyLargeCertsUsed(ctypes.c_void_p(self._closure), ctypes.c_bool(enabled)) + ) + + if res != 0: + raise self._chipStack.ErrorToException(res) + + self._maximizeCertChains = enabled + def __del__(self): self.Shutdown() @@ -243,7 +262,7 @@ def LoadAuthoritiesFromStorage(self): ca = self.NewCertificateAuthority(int(caIndex)) ca.LoadFabricAdminsFromStorage() - def NewCertificateAuthority(self, caIndex: int = None): + def NewCertificateAuthority(self, caIndex: int = None, maximizeCertChains: bool = False): ''' Creates a new CertificateAuthority instance with the provided CA Index and the PersistentStorage instance previously setup in the constructor. @@ -268,6 +287,7 @@ def NewCertificateAuthority(self, caIndex: int = None): self._persistentStorage.SetReplKey(key='caList', value=caList) ca = CertificateAuthority(chipStack=self._chipStack, caIndex=caIndex, persistentStorage=self._persistentStorage) + ca.maximizeCertChains = maximizeCertChains self._activeCaList.append(ca) return ca diff --git a/src/controller/python/chip/FabricAdmin.py b/src/controller/python/chip/FabricAdmin.py index 3cfe03ffdb7e95..97a729035f811e 100644 --- a/src/controller/python/chip/FabricAdmin.py +++ b/src/controller/python/chip/FabricAdmin.py @@ -102,7 +102,7 @@ def NewController(self, nodeId: int = None, paaTrustStorePath: str = "", useTest raise RuntimeError(f"Provided NodeId {nodeId} collides with an existing controller instance!") self.logger().warning( - f"Allocating new controller with CaIndex: {self._certificateAuthority.caIndex}, FabricId: 0x{self._fabricId:016X}, NodeId: 0x{nodeId:016X}") + f"Allocating new controller with CaIndex: {self._certificateAuthority.caIndex}, FabricId: 0x{self._fabricId:016X}, NodeId: 0x{nodeId:016X}, CatTags: {catTags}") controller = ChipDeviceCtrl.ChipDeviceController(opCredsContext=self._certificateAuthority.GetOpCredsContext(), fabricId=self._fabricId, nodeId=nodeId, adminVendorId=self._vendorId, paaTrustStorePath=paaTrustStorePath, useTestCommissioner=useTestCommissioner, fabricAdmin=self, catTags=catTags) diff --git a/src/controller/python/chip/utils/CommissioningBuildingBlocks.py b/src/controller/python/chip/utils/CommissioningBuildingBlocks.py index ae4da4a4ee1fa8..20dbcd6441a746 100644 --- a/src/controller/python/chip/utils/CommissioningBuildingBlocks.py +++ b/src/controller/python/chip/utils/CommissioningBuildingBlocks.py @@ -30,7 +30,7 @@ _UINT16_MAX = 65535 -logger = logging.getLogger() +logger = logging.getLogger('CommissioningBuildingBlocks') async def _IsNodeInFabricList(devCtrl, nodeId): @@ -43,7 +43,7 @@ async def _IsNodeInFabricList(devCtrl, nodeId): return False -async def GrantPrivilege(adminCtrl: ChipDeviceController, grantedCtrl: ChipDeviceController, privilege: Clusters.AccessControl.Enums.Privilege, targetNodeId: int): +async def GrantPrivilege(adminCtrl: ChipDeviceController, grantedCtrl: ChipDeviceController, privilege: Clusters.AccessControl.Enums.Privilege, targetNodeId: int, targetCatTags: typing.List[int] = []): ''' Given an existing controller with admin privileges over a target node, grants the specified privilege to the new ChipDeviceController instance to the entire Node. This is achieved by updating the ACL entries on the target. @@ -53,20 +53,29 @@ async def GrantPrivilege(adminCtrl: ChipDeviceController, grantedCtrl: ChipDevic Args: adminCtrl: ChipDeviceController instance with admin privileges over the target node grantedCtrl: ChipDeviceController instance that is being granted the new privilege. - privilege: Privilege to grant to the granted controller + privilege: Privilege to grant to the granted controller. If None, no privilege is granted. targetNodeId: Target node to which the controller is granted privilege. + targetCatTag: Target 32-bit CAT tag that is granted privilege. If provided, this will be used in the subject list instead of the nodeid of that of grantedCtrl. ''' - data = await adminCtrl.ReadAttribute(targetNodeId, [(Clusters.AccessControl.Attributes.Acl)]) if 0 not in data: raise ValueError("Did not get back any data (possible cause: controller has no access..") currentAcls = data[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl] + if len(targetCatTags) != 0: + # Convert to an ACL subject format in CAT range + targetSubjects = [tag | 0xFFFF_FFFD_0000_0000 for tag in targetCatTags] + else: + targetSubjects = [grantedCtrl.nodeId] + + if (len(targetSubjects) > 4): + raise ValueError(f"List of target subjects of len {len(targetSubjects)} exceeeded the minima of 4!") + # Step 1: Wipe the subject from all existing ACLs. for acl in currentAcls: if (acl.subjects != NullValue): - acl.subjects = [subject for subject in acl.subjects if subject != grantedCtrl.nodeId] + acl.subjects = [subject for subject in acl.subjects if subject not in targetSubjects] if (privilege): addedPrivilege = False @@ -75,9 +84,11 @@ async def GrantPrivilege(adminCtrl: ChipDeviceController, grantedCtrl: ChipDevic # the existing privilege in that entry matches our desired privilege. for acl in currentAcls: if acl.privilege == privilege: - if grantedCtrl.nodeId not in acl.subjects: - acl.subjects.append(grantedCtrl.nodeId) + subjectSet = set(acl.subjects) + subjectSet.update(targetSubjects) + acl.subjects = list(subjectSet) addedPrivilege = True + break # Step 3: If there isn't an existing entry to add to, make a new one. if (not(addedPrivilege)): @@ -86,14 +97,16 @@ async def GrantPrivilege(adminCtrl: ChipDeviceController, grantedCtrl: ChipDevic f"Cannot add another ACL entry to grant privilege to existing count of {currentAcls} ACLs -- will exceed minimas!") currentAcls.append(Clusters.AccessControl.Structs.AccessControlEntry(privilege=privilege, authMode=Clusters.AccessControl.Enums.AuthMode.kCase, - subjects=[grantedCtrl.nodeId])) + subjects=targetSubjects)) # Step 4: Prune ACLs which have empty subjects. currentAcls = [acl for acl in currentAcls if acl.subjects != NullValue and len(acl.subjects) != 0] + + logger.info(f'GrantPrivilege: Writing acls: {currentAcls}') await adminCtrl.WriteAttribute(targetNodeId, [(0, Clusters.AccessControl.Attributes.Acl(currentAcls))]) -async def CreateControllersOnFabric(fabricAdmin: FabricAdmin, adminDevCtrl: ChipDeviceController, controllerNodeIds: typing.List[int], privilege: Clusters.AccessControl.Enums.Privilege, targetNodeId: int) -> typing.List[ChipDeviceController]: +async def CreateControllersOnFabric(fabricAdmin: FabricAdmin, adminDevCtrl: ChipDeviceController, controllerNodeIds: typing.List[int], privilege: Clusters.AccessControl.Enums.Privilege, targetNodeId: int, catTags: typing.List[int] = []) -> typing.List[ChipDeviceController]: ''' Create new ChipDeviceController instances on a given fabric with a specific privilege on a target node. Args: @@ -102,13 +115,14 @@ async def CreateControllersOnFabric(fabricAdmin: FabricAdmin, adminDevCtrl: Chip controllerNodeIds: List of desired nodeIds for the controllers. privilege: The specific ACL privilege to grant to the newly minted controllers. targetNodeId: The Node ID of the target. + catTags: CAT Tags to include in the NOC of controller, as well as when setting up the ACLs on the target. ''' controllerList = [] for nodeId in controllerNodeIds: - newController = fabricAdmin.NewController(nodeId=nodeId) - await GrantPrivilege(adminDevCtrl, newController, privilege, targetNodeId) + newController = fabricAdmin.NewController(nodeId=nodeId, catTags=catTags) + await GrantPrivilege(adminDevCtrl, newController, privilege, targetNodeId, catTags) controllerList.append(newController) return controllerList diff --git a/src/controller/python/test/test_scripts/base.py b/src/controller/python/test/test_scripts/base.py index 8488b2e80b6ce6..8665c288276603 100644 --- a/src/controller/python/test/test_scripts/base.py +++ b/src/controller/python/test/test_scripts/base.py @@ -40,6 +40,7 @@ import copy import secrets import faulthandler +import ipdb logger = logging.getLogger('PythonMatterControllerTEST') logger.setLevel(logging.INFO) @@ -389,31 +390,28 @@ def TestFailsafe(self, nodeid: int): async def TestControllerCATValues(self, nodeid: int): ''' This tests controllers using CAT Values ''' - # Allocate a new controller instance with a CAT tag. - newController = self.fabricAdmin.NewController(nodeId=300, catTags=[0x00010001]) + newControllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=self.fabricAdmin, adminDevCtrl=self.devCtrl, controllerNodeIds=[300], targetNodeId=nodeid, privilege=None, catTags=[0x0001_0001]) # Read out an attribute using the new controller. It has no privileges, so this should fail with an UnsupportedAccess error. - res = await newController.ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) + res = await newControllers[0].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) if(res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl].Reason.status != IM.Status.UnsupportedAccess): self.logger.error(f"1: Received data instead of an error:{res}") return False - # Do a read-modify-write operation on the ACL list to add the CAT tag to the ACL list. - aclList = (await self.devCtrl.ReadAttribute(nodeid, [(0, Clusters.AccessControl.Attributes.Acl)]))[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl] - origAclList = copy.deepcopy(aclList) - aclList[0].subjects.append(0xFFFFFFFD00010001) - await self.devCtrl.WriteAttribute(nodeid, [(0, Clusters.AccessControl.Attributes.Acl(aclList))]) + # Grant the new controller privilege by adding the CAT tag to the subject. + await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[0], privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=nodeid, targetCatTags=[0x0001_0001]) # Read out the attribute again - this time, it should succeed. - res = await newController.ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) + res = await newControllers[0].ReadAttribute(nodeid=nodeid, attributes=[(0, Clusters.AccessControl.Attributes.Acl)]) if (type(res[0][Clusters.AccessControl][Clusters.AccessControl.Attributes.Acl][0]) != Clusters.AccessControl.Structs.AccessControlEntry): self.logger.error(f"2: Received something other than data:{res}") return False - # Write back the old entry to reset ACL list back. - await self.devCtrl.WriteAttribute(nodeid, [(0, Clusters.AccessControl.Attributes.Acl(origAclList))]) - newController.Shutdown() + # Reset the privilege back to pre-test. + await CommissioningBuildingBlocks.GrantPrivilege(adminCtrl=self.devCtrl, grantedCtrl=newControllers[0], privilege=None, targetNodeId=nodeid) + + newControllers[0].Shutdown() return True diff --git a/src/python_testing/TC_RR_1_1.py b/src/python_testing/TC_RR_1_1.py new file mode 100644 index 00000000000000..00dd9a111b8308 --- /dev/null +++ b/src/python_testing/TC_RR_1_1.py @@ -0,0 +1,424 @@ +# +# Copyright (c) 2022 Project CHIP Authors +# All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +from matter_testing_support import MatterBaseTest, default_matter_test_main, async_test_body +import chip.clusters as Clusters +import chip.FabricAdmin +import chip.CertificateAuthority +import logging +from mobly import asserts +from chip.utils import CommissioningBuildingBlocks +from chip.clusters.Attribute import TypedAttributePath, SubscriptionTransaction, AttributeStatus +from chip.interaction_model import Status as StatusEnum +import queue +import asyncio +from binascii import hexlify +from threading import Event +import time +import random + +from TC_SC_3_6 import AttributeChangeAccumulator, ResubscriptionCatcher + +# TODO: Overall, we need to add validation that session IDs have not changed throughout to be agnostic +# to some internal behavior assumptions of the SDK we are making relative to the write to +# the trigger the subscriptions not re-opening a new CASE session +# + + +class TC_RR_1_1(MatterBaseTest): + def setup_class(self): + self._pseudo_random_generator = random.Random(1234) + self._subscriptions = [] + + def teardown_class(self): + logging.info("Teardown: shutting down all subscription to avoid racy callbacks") + for subscription in self._subscriptions: + subscription.Shutdown() + + @async_test_body + async def test_TC_RR_1_1(self): + dev_ctrl = self.default_controller + + # Debug/test arguments + + # Get overrides for debugging the test + num_fabrics_to_commission = self.user_params.get("num_fabrics_to_commission", 5) + num_controllers_per_fabric = self.user_params.get("num_controllers_per_fabric", 3) + # Immediate reporting + min_report_interval_sec = self.user_params.get("min_report_interval_sec", 0) + # 10 minutes max reporting interval --> We don't care about keep-alives per-se and + # want to avoid resubscriptions + max_report_interval_sec = self.user_params.get("max_report_interval_sec", 10 * 60) + # Time to wait after changing NodeLabel for subscriptions to all hit. This is dependant + # on MRP params of subscriber and on actual min_report_interval. + # TODO: Determine the correct max value depending on target. Test plan doesn't say! + timeout_delay_sec = self.user_params.get("timeout_delay_sec", max_report_interval_sec * 2) + # Whether to skip filling the UserLabel clusters + skip_user_label_cluster_steps = self.user_params.get("skip_user_label_cluster_steps", False) + + BEFORE_LABEL = "Before Subscriptions 12345678912" + AFTER_LABEL = "After Subscriptions 123456789123" + + # Pre-conditions + + # Make sure all certificates are installed with maximal size + dev_ctrl.fabricAdmin.certificateAuthority.maximizeCertChains = True + + # TODO: Do from PICS list. The reflection approach here what a real client would do, + # and it respects what the test says: "TH writes 4 entries per endpoint where LabelList is supported" + logging.info("Pre-condition: determine whether any endpoints have UserLabel cluster (ULABEL.S.A0000(LabelList))") + endpoints_with_user_label_list = await dev_ctrl.ReadAttribute(self.dut_node_id, [Clusters.UserLabel.Attributes.LabelList]) + has_user_labels = len(endpoints_with_user_label_list) > 0 + if has_user_labels: + logging.info("--> User label cluster present on endpoints %s" % + ", ".join(["%d" % ep for ep in endpoints_with_user_label_list.keys()])) + else: + logging.info("--> User label cluster not present on any endpoitns") + + # Generate list of all clients names + all_names = [] + for fabric_idx in range(num_fabrics_to_commission): + for controller_idx in range(num_controllers_per_fabric): + all_names.append("RD%d%s" % (fabric_idx + 1, chr(ord('A') + controller_idx))) + logging.info(f"Client names that will be used: {all_names}") + client_list = [] + + # TODO: Shall we also verify SupportedFabrics attribute, and the CapabilityMinima attribute? + logging.info("Pre-conditions: validate CapabilityMinima.CaseSessionsPerFabric >= 3") + + capability_minima = await self.read_single_attribute(dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.Basic.Attributes.CapabilityMinima) + asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric, 3) + + # Step 1: Commission 5 fabrics with maximized NOC chains + logging.info(f"Step 1: use existing fabric to configure new fabrics so that total is {num_fabrics_to_commission} fabrics") + + # Generate Node IDs for subsequent controllers start at 200, follow 200, 300, ... + node_ids = [200 + (i * 100) for i in range(num_controllers_per_fabric - 1)] + + # Prepare clients for first fabric, that includes the default controller + dev_ctrl.name = all_names.pop(0) + client_list.append(dev_ctrl) + + if num_controllers_per_fabric > 1: + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=dev_ctrl.fabricAdmin, adminDevCtrl=dev_ctrl, controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=self.dut_node_id, catTags=[0x0001_0001]) + for controller in new_controllers: + controller.name = all_names.pop(0) + client_list.extend(new_controllers) + + # Prepare clients for subsequent fabrics + for i in range(num_fabrics_to_commission - 1): + admin_index = 2 + i + logging.info("Commissioning fabric %d/%d" % (admin_index, num_fabrics_to_commission)) + new_certificate_authority = self.certificate_authority_manager.NewCertificateAuthority() + new_fabric_admin = new_certificate_authority.NewFabricAdmin(vendorId=0xFFF1, fabricId=admin_index) + + new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId, catTags=[0x0001_0001]) + new_admin_ctrl.name = all_names.pop(0) + client_list.append(new_admin_ctrl) + await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) + + if num_controllers_per_fabric > 1: + new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric(fabricAdmin=new_fabric_admin, adminDevCtrl=new_admin_ctrl, + controllerNodeIds=node_ids, privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, targetNodeId=self.dut_node_id, catTags=[0x0001_0001]) + for controller in new_controllers: + controller.name = all_names.pop(0) + + client_list.extend(new_controllers) + + asserts.assert_equal(len(client_list), num_fabrics_to_commission * + num_controllers_per_fabric, "Must have the right number of clients") + + client_by_name = {client.name: client for client in client_list} + + # Step 2: Set the Label field for each fabric and BasicInformation.NodeLabel to 32 characters + logging.info("Step 2: Setting the Label field for each fabric and BasicInformation.NodeLabel to 32 characters") + + for idx in range(num_fabrics_to_commission): + fabric_number = idx + 1 + # Client is client A for each fabric to set the Label field + client_name = "RD%dA" % fabric_number + client = client_by_name[client_name] + + # Send the UpdateLabel command + label = ("%d" % fabric_number) * 32 + logging.info("Step 2a: Setting fabric label on fabric %d to '%s' using client %s" % (fabric_number, label, client_name)) + await client.SendCommand(self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.UpdateFabricLabel(label)) + + # Read back + fabric_metadata = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.Fabrics) + print(fabric_metadata) + asserts.assert_equal(fabric_metadata[0].label, label, "Fabrics[x].label must match what was written") + + # Before subscribing, set the NodeLabel to "Before Subscriptions" + logging.info(f"Step 2b: Set BasicInformation.NodeLabel to {BEFORE_LABEL}") + await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.Basic.Attributes.NodeLabel(value=BEFORE_LABEL))]) + + node_label = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.Basic.Attributes.NodeLabel) + asserts.assert_equal(node_label, BEFORE_LABEL, "NodeLabel must match what was written") + + # Step 3: Add 3 Access Control entries on DUT with a list of 4 Subjects and 3 Targets with the following parameters (...) + logging.info("Step 3: Fill ACL table so that all minimas are reached") + + for idx in range(num_fabrics_to_commission): + fabric_number = idx + 1 + # Client is client A for each fabric + client_name = "RD%dA" % fabric_number + client = client_by_name[client_name] + + acl = self.build_acl(fabric_number, client_by_name, num_controllers_per_fabric) + + logging.info(f"Step 3a: Writing ACL entry for fabric {fabric_number}") + await client.WriteAttribute(self.dut_node_id, [(0, Clusters.AccessControl.Attributes.Acl(acl))]) + + logging.info(f"Step 3b: Validating ACL entry for fabric {fabric_number}") + acl_readback = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.AccessControl.Attributes.Acl) + fabric_index = 9999 + for entry in acl_readback: + asserts.assert_equal(entry.fabricIndex, fabric_number, "Fabric Index of response entries must match") + fabric_index = entry.fabricIndex + + for entry in acl: + # Fix-up the original ACL list items (that all had fabricIndex of 0 on write, since ignored) + # so that they match incoming fabric index. Allows checking by equality of the structs + entry.fabricIndex = fabric_index + asserts.assert_equal(acl_readback, acl, "ACL must match what was written") + + # Step 4 and 5 (the operations cannot be separated): establish all CASE sessions and subscriptions + + # Subscribe with all clients to NodeLabel attribute and 2 more paths + sub_handlers = [] + resub_catchers = [] + output_queue = queue.Queue() + subscription_contents = [ + (0, Clusters.Basic.Attributes.NodeLabel), # Single attribute + (0, Clusters.OperationalCredentials), # Wildcard all of opcreds attributes on EP0 + Clusters.Descriptor # All descriptors on all endpoints + ] + + logging.info("Step 4 and 5 (first part): Establish subscription with all %d clients" % len(client_list)) + for sub_idx, client in enumerate(client_list): + logging.info("Establishing subscription %d/%d from controller node %s" % (sub_idx + 1, len(client_list), client.name)) + + sub = await client.ReadAttribute(nodeid=self.dut_node_id, attributes=subscription_contents, + reportInterval=(min_report_interval_sec, max_report_interval_sec), keepSubscriptions=False) + self._subscriptions.append(sub) + + attribute_handler = AttributeChangeAccumulator( + name=client.name, expected_attribute=Clusters.Basic.Attributes.NodeLabel, output=output_queue) + sub.SetAttributeUpdateCallback(attribute_handler) + sub_handlers.append(attribute_handler) + + # TODO: Replace resubscription catcher with API to disable re-subscription on failure + resub_catcher = ResubscriptionCatcher(name=client.name) + sub.SetResubscriptionAttemptedCallback(resub_catcher) + resub_catchers.append(resub_catcher) + + asserts.assert_equal(len(self._subscriptions), len(client_list), "Must have the right number of subscriptions") + + # Step 6: Read 9 paths and validate success + logging.info("Step 6: Read 9 paths (first 9 attributes of Basic Information cluster) and validate success") + + large_read_contents = [ + Clusters.Basic.Attributes.DataModelRevision, + Clusters.Basic.Attributes.VendorName, + Clusters.Basic.Attributes.VendorID, + Clusters.Basic.Attributes.ProductName, + Clusters.Basic.Attributes.ProductID, + Clusters.Basic.Attributes.NodeLabel, + Clusters.Basic.Attributes.Location, + Clusters.Basic.Attributes.HardwareVersion, + Clusters.Basic.Attributes.HardwareVersionString, + ] + large_read_paths = [(0, attrib) for attrib in large_read_contents] + basic_info = await dev_ctrl.ReadAttribute(self.dut_node_id, large_read_paths) + + # Make sure everything came back from the read that we expected + asserts.assert_true(0 in basic_info.keys(), "Must have read endpoint 0 data") + asserts.assert_true(Clusters.Basic in basic_info[0].keys(), "Must have read Basic Information cluster data") + for attribute in large_read_contents: + asserts.assert_true(attribute in basic_info[0][Clusters.Basic], + "Must have read back attribute %s" % (attribute.__name__)) + + # Step 7: Trigger a change on NodeLabel + logging.info( + "Step 7: Change attribute with one client, await all attributes changed successfully without loss of subscriptions") + await asyncio.sleep(1) + await client_list[0].WriteAttribute(self.dut_node_id, [(0, Clusters.Basic.Attributes.NodeLabel(value=AFTER_LABEL))]) + + all_changes = {client.name: False for client in client_list} + + # Await a stabilization delay in increments to let the event loops run + start_time = time.time() + elapsed = 0 + time_remaining = timeout_delay_sec + + while time_remaining > 0: + try: + item = output_queue.get(block=True, timeout=time_remaining) + client_name, endpoint, attribute, value = item['name'], item['endpoint'], item['attribute'], item['value'] + + # Record arrival of an expected subscription change when seen + if endpoint == 0 and attribute == Clusters.Basic.Attributes.NodeLabel and value == AFTER_LABEL: + if not all_changes[client_name]: + logging.info("Got expected attribute change for client %s" % client_name) + all_changes[client_name] = True + + # We are done waiting when we have accumulated all results + if all(all_changes.values()): + logging.info("All clients have reported, done waiting.") + break + except queue.Empty: + # No error, we update timeouts and keep going + pass + + elapsed = time.time() - start_time + time_remaining = timeout_delay_sec - elapsed + + logging.info("Step 7: Validation of results") + sub_test_failed = False + + for catcher in resub_catchers: + if catcher.caught_resubscription: + logging.error("Client %s saw a resubscription" % catcher.name) + sub_test_failed = True + else: + logging.info("Client %s correctly did not see a resubscription" % catcher.name) + + all_reports_gotten = all(all_changes.values()) + if not all_reports_gotten: + logging.error("Missing reports from the following clients: %s" % + ", ".join([name for name, value in all_changes.items() if value is False])) + sub_test_failed = True + else: + logging.info("Got successful reports from all clients, meaning all concurrent CASE sessions worked") + + # Determine result of Step 7 + if sub_test_failed: + asserts.fail("Failed step 7 !") + + # Step 8: Validate sessions have not changed by doing a read on NodeLabel from all clients + logging.info("Step 8: Read back NodeLabel directly from all clients") + for sub_idx, client in enumerate(client_list): + logging.info("Reading NodeLabel (%d/%d) from controller node %s" % (sub_idx + 1, len(client_list), client.name)) + + label_readback = await self.read_single_attribute(client, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.Basic.Attributes.NodeLabel) + asserts.assert_equal(label_readback, AFTER_LABEL) + + # TODO: Compare before/after session IDs. Requires more native changes, and the + # subcription method above is actually good enough we think. + + # Step 9: Fill user label list + if has_user_labels and not skip_user_label_cluster_steps: + await self.fill_user_label_list(dev_ctrl, self.dut_node_id) + else: + logging.info("Step 9: Skipped due to no UserLabel cluster instances") + + def random_string(self, length) -> str: + rnd = self._pseudo_random_generator + return "".join([rnd.choice("abcdef0123456789") for _ in range(length)])[:length] + + async def fill_user_label_list(self, dev_ctrl, target_node_id): + logging.info("Step 9: Fill UserLabel clusters on each endpoint") + user_labels = await dev_ctrl.ReadAttribute(target_node_id, [Clusters.UserLabel]) + + # Build 4 sets of maximized labels + random_label = self.random_string(16) + random_value = self.random_string(16) + labels = [Clusters.UserLabel.Structs.LabelStruct(label=random_label, value=random_value) for _ in range(4)] + + for endpoint_id in user_labels: + clusters = user_labels[endpoint_id] + for cluster in clusters: + if cluster == Clusters.UserLabel: + logging.info("Step 9a: Filling UserLabel cluster on endpoint %d" % endpoint_id) + statuses = await dev_ctrl.WriteAttribute(target_node_id, [(endpoint_id, Clusters.UserLabel.Attributes.LabelList(labels))]) + asserts.assert_equal(statuses[0].Status, StatusEnum.Success, "Label write must succeed") + + logging.info("Step 9b: Validate UserLabel cluster contents after write on endpoint %d" % endpoint_id) + read_back_labels = await self.read_single_attribute(dev_ctrl, node_id=target_node_id, endpoint=endpoint_id, attribute=Clusters.UserLabel.Attributes.LabelList) + print(read_back_labels) + + asserts.assert_equal(read_back_labels, labels, "LabelList attribute must match what was written") + + def build_acl(self, fabric_number, client_by_name, num_controllers_per_fabric): + acl = [] + + # Test says: + # + # . struct + # - Privilege field: Administer (5) + # - AuthMode field: CASE (2) + # - Subjects field: [0xFFFF_FFFD_0001_0001, 0x2000_0000_0000_0001, 0x2000_0000_0000_0002, 0x2000_0000_0000_0003] + # - Targets field: [{Endpoint: 0}, {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC30}, {Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC31}] + # . struct + # - Privilege field: Manage (4) + # - AuthMode field: CASE (2) + # - Subjects field: [0x1000_0000_0000_0001, 0x1000_0000_0000_0002, 0x1000_0000_0000_0003, 0x1000_0000_0000_0004] + # - Targets field: [{Cluster: 0xFFF1_FC00, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC01, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC22}] + # . struct + # - Privilege field: Operate (3) + # - AuthMode field: CASE (2) + # - Subjects field: [0x3000_0000_0000_0001, 0x3000_0000_0000_0002, 0x3000_0000_0000_0003, 0x3000_0000_0000_0004] + # - Targets field: [{Cluster: 0xFFF1_FC40, DeviceType: 0xFFF1_FC20}, {Cluster: 0xFFF1_FC41, DeviceType: 0xFFF1_FC21}, {Cluster: 0xFFF1_FC02, DeviceType: 0xFFF1_FC42}] + + # Administer ACL entry + admin_subjects = [0xFFFF_FFFD_0001_0001, 0x2000_0000_0000_0001, 0x2000_0000_0000_0002, 0x2000_0000_0000_0003] + + admin_targets = [ + Clusters.AccessControl.Structs.Target(endpoint=0), + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC00, deviceType=0xFFF1_BC30), + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC01, deviceType=0xFFF1_BC31) + ] + admin_acl_entry = Clusters.AccessControl.Structs.AccessControlEntry(privilege=Clusters.AccessControl.Enums.Privilege.kAdminister, + authMode=Clusters.AccessControl.Enums.AuthMode.kCase, + subjects=admin_subjects, + targets=admin_targets) + acl.append(admin_acl_entry) + + # Manage ACL entry + manage_subjects = [0x1000_0000_0000_0001, 0x1000_0000_0000_0002, 0x1000_0000_0000_0003, 0x1000_0000_0000_0004] + manage_targets = [ + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC00, deviceType=0xFFF1_BC20), + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC01, deviceType=0xFFF1_BC21), + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC02, deviceType=0xFFF1_BC22) + ] + + manage_acl_entry = Clusters.AccessControl.Structs.AccessControlEntry(privilege=Clusters.AccessControl.Enums.Privilege.kManage, + authMode=Clusters.AccessControl.Enums.AuthMode.kCase, + subjects=manage_subjects, + targets=manage_targets) + acl.append(manage_acl_entry) + + # Operate ACL entry + operate_subjects = [0x3000_0000_0000_0001, 0x3000_0000_0000_0002, 0x3000_0000_0000_0003, 0x3000_0000_0000_0004] + operate_targets = [ + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC40, deviceType=0xFFF1_BC20), + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC41, deviceType=0xFFF1_BC21), + Clusters.AccessControl.Structs.Target(cluster=0xFFF1_FC42, deviceType=0xFFF1_BC42) + ] + + operate_acl_entry = Clusters.AccessControl.Structs.AccessControlEntry(privilege=Clusters.AccessControl.Enums.Privilege.kOperate, + authMode=Clusters.AccessControl.Enums.AuthMode.kCase, + subjects=operate_subjects, + targets=operate_targets) + acl.append(operate_acl_entry) + + return acl + + +if __name__ == "__main__": + default_matter_test_main(maximize_cert_chains=True, controller_cat_tags=[0x0001_0001]) diff --git a/src/python_testing/matter_testing_support.py b/src/python_testing/matter_testing_support.py index 607f24a057b9c3..7c21f0b6471ce8 100644 --- a/src/python_testing/matter_testing_support.py +++ b/src/python_testing/matter_testing_support.py @@ -124,6 +124,7 @@ class MatterTestConfig: ble_interface_id: int = None admin_vendor_id: int = _DEFAULT_ADMIN_VENDOR_ID + case_admin_subject: int = None global_test_params: dict = field(default_factory=dict) # List of explicit tests to run by name. If empty, all tests will run tests: List[str] = field(default_factory=list) @@ -132,6 +133,7 @@ class MatterTestConfig: discriminator: int = None setup_passcode: int = None commissionee_ip_address_just_for_testing: str = None + maximize_cert_chains: bool = False qr_code_content: str = None manual_code: str = None @@ -144,6 +146,9 @@ class MatterTestConfig: dut_node_id: int = _DEFAULT_DUT_NODE_ID # Node ID to use for controller/commissioner controller_node_id: int = _DEFAULT_CONTROLLER_NODE_ID + # CAT Tags for default controller/commissioner + controller_cat_tags: List[int] = None + # Fabric ID which to use fabric_id: int = None # "Alpha" by default @@ -185,11 +190,12 @@ def _init_stack(self, already_initialized: bool, **kwargs): if (len(self._certificate_authority_manager.activeCaList) == 0): self._logger.warn( "Didn't find any CertificateAuthorities in storage -- creating a new CertificateAuthority + FabricAdmin...") - ca = self._certificate_authority_manager.NewCertificateAuthority() - ca.NewFabricAdmin(vendorId=0xFFF1, fabricId=0xFFF1) + ca = self._certificate_authority_manager.NewCertificateAuthority(caIndex=self._config.root_of_trust_index) + ca.maximizeCertChains = self._config.maximize_cert_chains + ca.NewFabricAdmin(vendorId=0xFFF1, fabricId=self._config.fabric_id) elif (len(self._certificate_authority_manager.activeCaList[0].adminList) == 0): self._logger.warn("Didn't find any FabricAdmins in storage -- creating a new one...") - self._certificate_authority_manager.activeCaList[0].NewFabricAdmin(vendorId=0xFFF1, fabricId=0xFFF1) + self._certificate_authority_manager.activeCaList[0].NewFabricAdmin(vendorId=0xFFF1, fabricId=self._config.fabric_id) # TODO: support getting access to chip-tool credentials issuer's data @@ -477,6 +483,13 @@ def populate_commissioning_args(args: argparse.Namespace, config: MatterTestConf return False config.commissionee_ip_address_just_for_testing = args.ip_addr + if args.case_admin_subject is None: + # Use controller node ID as CASE admin subject during commissioning if nothing provided + config.case_admin_subject = config.controller_node_id + else: + # If a CASE admin subject is provided, then use that + config.case_admin_subject = args.case_admin_subject + return True @@ -569,6 +582,8 @@ def parse_matter_test_args(argv: List[str]) -> MatterTestConfig: commission_group.add_argument('--admin-vendor-id', action="store", type=int_decimal_or_hex, default=_DEFAULT_ADMIN_VENDOR_ID, metavar="VENDOR_ID", help="VendorID to use during commissioning (default 0x%04X)" % _DEFAULT_ADMIN_VENDOR_ID) + commission_group.add_argument('--case-admin-subject', action="store", type=int_decimal_or_hex, + metavar="CASE_ADMIN_SUBJECT", help="Set the CASE admin subject to an explicit value (default to commissioner Node ID)") code_group = parser.add_mutually_exclusive_group(required=False) @@ -655,7 +670,7 @@ def _commission_device(self) -> bool: raise ValueError("Invalid commissioning method %s!" % conf.commissioning_method) -def default_matter_test_main(argv=None): +def default_matter_test_main(argv=None, **kwargs): """Execute the test class in a test module. This is the default entry point for running a test script file directly. In this case, only one test class in a test script is allowed. @@ -670,6 +685,10 @@ def default_matter_test_main(argv=None): """ matter_test_config = parse_matter_test_args(argv) + # Allow override of command line from optional arguments + if matter_test_config.controller_cat_tags is None and "controller_cat_tags" in kwargs: + matter_test_config.controller_cat_tags = kwargs["controller_cat_tags"] + # Find the test class in the test script. test_class = _find_test_class() @@ -681,12 +700,21 @@ def default_matter_test_main(argv=None): if len(matter_test_config.tests) > 0: tests = matter_test_config.tests + # This is required in case we need any testing with maximized certificate chains. + # We need *all* issuers from the start, even for default controller, to use + # maximized chains, before MatterStackState init, others some stale certs + # may not chain properly. + if "maximize_cert_chains" in kwargs: + matter_test_config.maximize_cert_chains = kwargs["maximize_cert_chains"] + stack = MatterStackState(matter_test_config) test_config.user_params["matter_stack"] = stash_globally(stack) # TODO: Steer to right FabricAdmin! + # TODO: If CASE Admin Subject is a CAT tag range, then make sure to issue NOC with that CAT tag + default_controller = stack.certificate_authorities[0].adminList[0].NewController(nodeId=matter_test_config.controller_node_id, - paaTrustStorePath=str(matter_test_config.paa_trust_store_path)) + paaTrustStorePath=str(matter_test_config.paa_trust_store_path), catTags=matter_test_config.controller_cat_tags) test_config.user_params["default_controller"] = stash_globally(default_controller) test_config.user_params["matter_test_config"] = stash_globally(matter_test_config)