Compare commits

...

1 Commits

Author SHA1 Message Date
9d06f983af Imported more library files
Not compiling currently
2025-04-12 23:37:19 +01:00
2518 changed files with 1021900 additions and 52 deletions

View File

@@ -0,0 +1,181 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* \@file
* This file defines constants and macros for introspecting and
* manipulating CHIP versions.
*
* !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!!
*
* DO NOT EDIT THIS FILE! This file is automatically-generated by
* the '${program}' script.
*
* The constants and macros defined here may be used to, for ,
* example, conditionally-compile older, newer, or changed CHIP
* APIs based on the CHIP version. For example:
*
* \@code
* #if CHIP_VERSION_CODE >= CHIP_VERSION_CODE_ENCODE(1, 5, 0)
* ...
* #else
* ...
* #endif
* \@endcode
*
*/
#ifndef CHIP_VERSION_H_
#define CHIP_VERSION_H_
#define _CHIP_VERSION_CODE_MAJOR_WIDTH 8
#define _CHIP_VERSION_CODE_MINOR_WIDTH 8
#define _CHIP_VERSION_CODE_PATCH_WIDTH 8
#define _CHIP_VERSION_CODE_MAJOR_MASK ((1 << _CHIP_VERSION_CODE_MAJOR_WIDTH) - 1)
#define _CHIP_VERSION_CODE_MINOR_MASK ((1 << _CHIP_VERSION_CODE_MINOR_WIDTH) - 1)
#define _CHIP_VERSION_CODE_PATCH_MASK ((1 << _CHIP_VERSION_CODE_PATCH_WIDTH) - 1)
#define _CHIP_VERSION_CODE_MAJOR_SHIFT 24
#define _CHIP_VERSION_CODE_MINOR_SHIFT 16
#define _CHIP_VERSION_CODE_PATCH_SHIFT 8
/**
* \@def CHIP_VERSION_CODE_ENCODE(major, minor, patch)
*
* \@brief
* Encode a CHIP version code from its constituent \@a major, \@a minor, and \@a patch
* components.
*
* This macro may be used in conjunction with CHIP_VERSION_CODE to, for
* example, conditionally-compile older, newer, or changed CHIP APIs based
* on the CHIP version. For example:
*
* \@code
* #if CHIP_VERSION_CODE >= CHIP_VERSION_CODE_ENCODE(1, 5, 0)
* ...
* #else
* ...
* #endif
* \@endcode
*
*/
#define CHIP_VERSION_CODE_ENCODE(major, minor, patch) \
((((major) & _CHIP_VERSION_CODE_MAJOR_MASK) << _CHIP_VERSION_CODE_MAJOR_SHIFT) | \
(((minor) & _CHIP_VERSION_CODE_MINOR_MASK) << _CHIP_VERSION_CODE_MINOR_SHIFT) | \
(((patch) & _CHIP_VERSION_CODE_PATCH_MASK) << _CHIP_VERSION_CODE_PATCH_SHIFT))
/**
* \@def CHIP_VERSION_CODE_DECODE_MAJOR(code)
*
* \@brief
* Decode a CHIP major version component from a CHIP version \@a code.
*
*/
#define CHIP_VERSION_CODE_DECODE_MAJOR(code) (((code) >> _CHIP_VERSION_CODE_MAJOR_SHIFT) & _CHIP_VERSION_CODE_MAJOR_MASK)
/**
* \@def CHIP_VERSION_CODE_DECODE_MINOR(code)
*
* \@brief
* Decode a CHIP minor version component from a CHIP version \@a code.
*
*/
#define CHIP_VERSION_CODE_DECODE_MINOR(code) (((code) >> _CHIP_VERSION_CODE_MINOR_SHIFT) & _CHIP_VERSION_CODE_MINOR_MASK)
/**
* \@def CHIP_VERSION_CODE_DECODE_PATCH(code)
*
* \@brief
* Decode a CHIP patch version component from a CHIP version \@a code.
*
*/
#define CHIP_VERSION_CODE_DECODE_PATCH(code) (((code) >> _CHIP_VERSION_CODE_PATCH_SHIFT) & _CHIP_VERSION_CODE_PATCH_MASK)
/**
* \@def CHIP_VERSION_MAJOR
*
* \@brief
* The CHIP version major component, as an unsigned integer.
*
*/
#define CHIP_VERSION_MAJOR 0
/**
* \@def CHIP_VERSION_MINOR
*
* \@brief
* The CHIP version minor component, as an unsigned integer.
*
*/
#define CHIP_VERSION_MINOR 0
/**
* \@def CHIP_VERSION_PATCH
*
* \@brief
* The CHIP version patch component, as an unsigned integer.
*
*/
#define CHIP_VERSION_PATCH 0
/**
* \@def CHIP_VERSION_EXTRA
*
* \@brief
* The CHIP version extra component, as a quoted C string.
*
*/
#define CHIP_VERSION_EXTRA ""
/**
* \@def CHIP_VERSION_STRING
*
* \@brief
* The CHIP version, as a quoted C string.
*
*/
#define CHIP_VERSION_STRING "0.0.0"
/**
* \@def CHIP_VERSION_CODE
*
* \@brief
* The CHIP version, including the major, minor, and patch components,
* encoded as an unsigned integer.
*
* This macro may be used in conjunction with CHIP_VERSION_CODE_ENCODE
* to, for example, conditionally-compile older, newer, or changed CHIP
* APIs based on the CHIP version. For example:
*
* \@code
* #if CHIP_VERSION_CODE >= CHIP_VERSION_CODE_ENCODE(1, 5, 0)
* ...
* #else
* ...
* #endif
* \@endcode
*
*/
#define CHIP_VERSION_CODE CHIP_VERSION_CODE_ENCODE( \
CHIP_VERSION_MAJOR, \
CHIP_VERSION_MINOR, \
CHIP_VERSION_PATCH \
)
#endif /* CHIP_VERSION_H_ */

View File

@@ -0,0 +1,4 @@
// Generated by write_build_time_header.py
#pragma once
#define CHIP_DEVICE_CONFIG_FIRMWARE_BUILD_TIME_MATTER_EPOCH_S 750270532

View File

@@ -0,0 +1,9 @@
// Generated by write_buildconfig_header.py
// From "//third_party/connectedhomeip/src/access:gen_access_buildconfig"
#ifndef ACCESS_ACCESSBUILDCONFIG_H_
#define ACCESS_ACCESSBUILDCONFIG_H_
#define CHIP_CONFIG_USE_ACCESS_RESTRICTIONS 0
#endif // ACCESS_ACCESSBUILDCONFIG_H_

View File

@@ -0,0 +1,260 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
* Copyright (c) 2019 Google LLC.
* Copyright (c) 2013-2017 Nest Labs, Inc.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
/**
* @file
* ASN.1 Object ID Definitions
*
* !!! WARNING !!! WARNING !!! WARNING !!!
*
* DO NOT EDIT THIS FILE! This file is generated by the
* gen-oid-table.py script.
*
* To make changes, edit the script and re-run it to generate
* this file.
*
*/
#pragma once
#include <cstdint>
namespace chip {
namespace ASN1 {
enum OIDCategory
{
kOIDCategory_PubKeyAlgo = 0x0100,
kOIDCategory_SigAlgo = 0x0200,
kOIDCategory_AttributeType = 0x0300,
kOIDCategory_EllipticCurve = 0x0400,
kOIDCategory_Extension = 0x0500,
kOIDCategory_KeyPurpose = 0x0600,
kOIDCategory_NotSpecified = 0,
kOIDCategory_Unknown = 0x0F00,
kOIDCategory_Mask = 0x0F00
};
typedef uint16_t OID;
enum
{
kOID_PubKeyAlgo_ECPublicKey = 0x0101,
kOID_SigAlgo_ECDSAWithSHA256 = 0x0201,
kOID_AttributeType_CommonName = 0x0301,
kOID_AttributeType_Surname = 0x0302,
kOID_AttributeType_SerialNumber = 0x0303,
kOID_AttributeType_CountryName = 0x0304,
kOID_AttributeType_LocalityName = 0x0305,
kOID_AttributeType_StateOrProvinceName = 0x0306,
kOID_AttributeType_OrganizationName = 0x0307,
kOID_AttributeType_OrganizationalUnitName = 0x0308,
kOID_AttributeType_Title = 0x0309,
kOID_AttributeType_Name = 0x030A,
kOID_AttributeType_GivenName = 0x030B,
kOID_AttributeType_Initials = 0x030C,
kOID_AttributeType_GenerationQualifier = 0x030D,
kOID_AttributeType_DNQualifier = 0x030E,
kOID_AttributeType_Pseudonym = 0x030F,
kOID_AttributeType_DomainComponent = 0x0310,
kOID_AttributeType_MatterNodeId = 0x0311,
kOID_AttributeType_MatterFirmwareSigningId = 0x0312,
kOID_AttributeType_MatterICACId = 0x0313,
kOID_AttributeType_MatterRCACId = 0x0314,
kOID_AttributeType_MatterFabricId = 0x0315,
kOID_AttributeType_MatterCASEAuthTag = 0x0316,
kOID_EllipticCurve_prime256v1 = 0x0401,
kOID_Extension_BasicConstraints = 0x0501,
kOID_Extension_KeyUsage = 0x0502,
kOID_Extension_ExtendedKeyUsage = 0x0503,
kOID_Extension_SubjectKeyIdentifier = 0x0504,
kOID_Extension_AuthorityKeyIdentifier = 0x0505,
kOID_Extension_CSRRequest = 0x0506,
kOID_KeyPurpose_ServerAuth = 0x0601,
kOID_KeyPurpose_ClientAuth = 0x0602,
kOID_KeyPurpose_CodeSigning = 0x0603,
kOID_KeyPurpose_EmailProtection = 0x0604,
kOID_KeyPurpose_TimeStamping = 0x0605,
kOID_KeyPurpose_OCSPSigning = 0x0606,
kOID_NotSpecified = 0,
kOID_Unknown = 0xFFFF,
kOID_EnumMask = 0x00FF
};
struct OIDTableEntry
{
OID EnumVal;
const uint8_t *EncodedOID;
uint16_t EncodedOIDLen;
};
struct OIDNameTableEntry
{
OID EnumVal;
const char *Name;
};
extern const OIDTableEntry sOIDTable[];
extern const OIDNameTableEntry sOIDNameTable[];
extern const size_t sOIDTableSize;
#ifdef ASN1_DEFINE_OID_TABLE
static const uint8_t sOID_PubKeyAlgo_ECPublicKey[] = { 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x02, 0x01 };
static const uint8_t sOID_SigAlgo_ECDSAWithSHA256[] = { 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x04, 0x03, 0x02 };
static const uint8_t sOID_AttributeType_CommonName[] = { 0x55, 0x04, 0x03 };
static const uint8_t sOID_AttributeType_Surname[] = { 0x55, 0x04, 0x04 };
static const uint8_t sOID_AttributeType_SerialNumber[] = { 0x55, 0x04, 0x05 };
static const uint8_t sOID_AttributeType_CountryName[] = { 0x55, 0x04, 0x06 };
static const uint8_t sOID_AttributeType_LocalityName[] = { 0x55, 0x04, 0x07 };
static const uint8_t sOID_AttributeType_StateOrProvinceName[] = { 0x55, 0x04, 0x08 };
static const uint8_t sOID_AttributeType_OrganizationName[] = { 0x55, 0x04, 0x0A };
static const uint8_t sOID_AttributeType_OrganizationalUnitName[] = { 0x55, 0x04, 0x0B };
static const uint8_t sOID_AttributeType_Title[] = { 0x55, 0x04, 0x0C };
static const uint8_t sOID_AttributeType_Name[] = { 0x55, 0x04, 0x29 };
static const uint8_t sOID_AttributeType_GivenName[] = { 0x55, 0x04, 0x2A };
static const uint8_t sOID_AttributeType_Initials[] = { 0x55, 0x04, 0x2B };
static const uint8_t sOID_AttributeType_GenerationQualifier[] = { 0x55, 0x04, 0x2C };
static const uint8_t sOID_AttributeType_DNQualifier[] = { 0x55, 0x04, 0x2E };
static const uint8_t sOID_AttributeType_Pseudonym[] = { 0x55, 0x04, 0x41 };
static const uint8_t sOID_AttributeType_DomainComponent[] = { 0x09, 0x92, 0x26, 0x89, 0x93, 0xF2, 0x2C, 0x64, 0x01, 0x19 };
static const uint8_t sOID_AttributeType_MatterNodeId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x01, 0x01 };
static const uint8_t sOID_AttributeType_MatterFirmwareSigningId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x01, 0x02 };
static const uint8_t sOID_AttributeType_MatterICACId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x01, 0x03 };
static const uint8_t sOID_AttributeType_MatterRCACId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x01, 0x04 };
static const uint8_t sOID_AttributeType_MatterFabricId[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x01, 0x05 };
static const uint8_t sOID_AttributeType_MatterCASEAuthTag[] = { 0x2B, 0x06, 0x01, 0x04, 0x01, 0x82, 0xA2, 0x7C, 0x01, 0x06 };
static const uint8_t sOID_EllipticCurve_prime256v1[] = { 0x2A, 0x86, 0x48, 0xCE, 0x3D, 0x03, 0x01, 0x07 };
static const uint8_t sOID_Extension_BasicConstraints[] = { 0x55, 0x1D, 0x13 };
static const uint8_t sOID_Extension_KeyUsage[] = { 0x55, 0x1D, 0x0F };
static const uint8_t sOID_Extension_ExtendedKeyUsage[] = { 0x55, 0x1D, 0x25 };
static const uint8_t sOID_Extension_SubjectKeyIdentifier[] = { 0x55, 0x1D, 0x0E };
static const uint8_t sOID_Extension_AuthorityKeyIdentifier[] = { 0x55, 0x1D, 0x23 };
static const uint8_t sOID_Extension_CSRRequest[] = { 0x2A, 0x86, 0x48, 0x86, 0xF7, 0x0D, 0x01, 0x09, 0x0E };
static const uint8_t sOID_KeyPurpose_ServerAuth[] = { 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x01 };
static const uint8_t sOID_KeyPurpose_ClientAuth[] = { 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x02 };
static const uint8_t sOID_KeyPurpose_CodeSigning[] = { 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x03 };
static const uint8_t sOID_KeyPurpose_EmailProtection[] = { 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x04 };
static const uint8_t sOID_KeyPurpose_TimeStamping[] = { 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x08 };
static const uint8_t sOID_KeyPurpose_OCSPSigning[] = { 0x2B, 0x06, 0x01, 0x05, 0x05, 0x07, 0x03, 0x09 };
const OIDTableEntry sOIDTable[] =
{
{ kOID_PubKeyAlgo_ECPublicKey, sOID_PubKeyAlgo_ECPublicKey, sizeof(sOID_PubKeyAlgo_ECPublicKey) },
{ kOID_SigAlgo_ECDSAWithSHA256, sOID_SigAlgo_ECDSAWithSHA256, sizeof(sOID_SigAlgo_ECDSAWithSHA256) },
{ kOID_AttributeType_CommonName, sOID_AttributeType_CommonName, sizeof(sOID_AttributeType_CommonName) },
{ kOID_AttributeType_Surname, sOID_AttributeType_Surname, sizeof(sOID_AttributeType_Surname) },
{ kOID_AttributeType_SerialNumber, sOID_AttributeType_SerialNumber, sizeof(sOID_AttributeType_SerialNumber) },
{ kOID_AttributeType_CountryName, sOID_AttributeType_CountryName, sizeof(sOID_AttributeType_CountryName) },
{ kOID_AttributeType_LocalityName, sOID_AttributeType_LocalityName, sizeof(sOID_AttributeType_LocalityName) },
{ kOID_AttributeType_StateOrProvinceName, sOID_AttributeType_StateOrProvinceName, sizeof(sOID_AttributeType_StateOrProvinceName) },
{ kOID_AttributeType_OrganizationName, sOID_AttributeType_OrganizationName, sizeof(sOID_AttributeType_OrganizationName) },
{ kOID_AttributeType_OrganizationalUnitName, sOID_AttributeType_OrganizationalUnitName, sizeof(sOID_AttributeType_OrganizationalUnitName) },
{ kOID_AttributeType_Title, sOID_AttributeType_Title, sizeof(sOID_AttributeType_Title) },
{ kOID_AttributeType_Name, sOID_AttributeType_Name, sizeof(sOID_AttributeType_Name) },
{ kOID_AttributeType_GivenName, sOID_AttributeType_GivenName, sizeof(sOID_AttributeType_GivenName) },
{ kOID_AttributeType_Initials, sOID_AttributeType_Initials, sizeof(sOID_AttributeType_Initials) },
{ kOID_AttributeType_GenerationQualifier, sOID_AttributeType_GenerationQualifier, sizeof(sOID_AttributeType_GenerationQualifier) },
{ kOID_AttributeType_DNQualifier, sOID_AttributeType_DNQualifier, sizeof(sOID_AttributeType_DNQualifier) },
{ kOID_AttributeType_Pseudonym, sOID_AttributeType_Pseudonym, sizeof(sOID_AttributeType_Pseudonym) },
{ kOID_AttributeType_DomainComponent, sOID_AttributeType_DomainComponent, sizeof(sOID_AttributeType_DomainComponent) },
{ kOID_AttributeType_MatterNodeId, sOID_AttributeType_MatterNodeId, sizeof(sOID_AttributeType_MatterNodeId) },
{ kOID_AttributeType_MatterFirmwareSigningId, sOID_AttributeType_MatterFirmwareSigningId, sizeof(sOID_AttributeType_MatterFirmwareSigningId) },
{ kOID_AttributeType_MatterICACId, sOID_AttributeType_MatterICACId, sizeof(sOID_AttributeType_MatterICACId) },
{ kOID_AttributeType_MatterRCACId, sOID_AttributeType_MatterRCACId, sizeof(sOID_AttributeType_MatterRCACId) },
{ kOID_AttributeType_MatterFabricId, sOID_AttributeType_MatterFabricId, sizeof(sOID_AttributeType_MatterFabricId) },
{ kOID_AttributeType_MatterCASEAuthTag, sOID_AttributeType_MatterCASEAuthTag, sizeof(sOID_AttributeType_MatterCASEAuthTag) },
{ kOID_EllipticCurve_prime256v1, sOID_EllipticCurve_prime256v1, sizeof(sOID_EllipticCurve_prime256v1) },
{ kOID_Extension_BasicConstraints, sOID_Extension_BasicConstraints, sizeof(sOID_Extension_BasicConstraints) },
{ kOID_Extension_KeyUsage, sOID_Extension_KeyUsage, sizeof(sOID_Extension_KeyUsage) },
{ kOID_Extension_ExtendedKeyUsage, sOID_Extension_ExtendedKeyUsage, sizeof(sOID_Extension_ExtendedKeyUsage) },
{ kOID_Extension_SubjectKeyIdentifier, sOID_Extension_SubjectKeyIdentifier, sizeof(sOID_Extension_SubjectKeyIdentifier) },
{ kOID_Extension_AuthorityKeyIdentifier, sOID_Extension_AuthorityKeyIdentifier, sizeof(sOID_Extension_AuthorityKeyIdentifier) },
{ kOID_Extension_CSRRequest, sOID_Extension_CSRRequest, sizeof(sOID_Extension_CSRRequest) },
{ kOID_KeyPurpose_ServerAuth, sOID_KeyPurpose_ServerAuth, sizeof(sOID_KeyPurpose_ServerAuth) },
{ kOID_KeyPurpose_ClientAuth, sOID_KeyPurpose_ClientAuth, sizeof(sOID_KeyPurpose_ClientAuth) },
{ kOID_KeyPurpose_CodeSigning, sOID_KeyPurpose_CodeSigning, sizeof(sOID_KeyPurpose_CodeSigning) },
{ kOID_KeyPurpose_EmailProtection, sOID_KeyPurpose_EmailProtection, sizeof(sOID_KeyPurpose_EmailProtection) },
{ kOID_KeyPurpose_TimeStamping, sOID_KeyPurpose_TimeStamping, sizeof(sOID_KeyPurpose_TimeStamping) },
{ kOID_KeyPurpose_OCSPSigning, sOID_KeyPurpose_OCSPSigning, sizeof(sOID_KeyPurpose_OCSPSigning) },
{ kOID_NotSpecified, NULL, 0 }
};
const size_t sOIDTableSize = sizeof(sOIDTable) / sizeof(OIDTableEntry);
#endif // ASN1_DEFINE_OID_TABLE
#ifdef ASN1_DEFINE_OID_NAME_TABLE
const OIDNameTableEntry sOIDNameTable[] =
{
{ kOID_PubKeyAlgo_ECPublicKey, "ECPublicKey" },
{ kOID_SigAlgo_ECDSAWithSHA256, "ECDSAWithSHA256" },
{ kOID_AttributeType_CommonName, "CommonName" },
{ kOID_AttributeType_Surname, "Surname" },
{ kOID_AttributeType_SerialNumber, "SerialNumber" },
{ kOID_AttributeType_CountryName, "CountryName" },
{ kOID_AttributeType_LocalityName, "LocalityName" },
{ kOID_AttributeType_StateOrProvinceName, "StateOrProvinceName" },
{ kOID_AttributeType_OrganizationName, "OrganizationName" },
{ kOID_AttributeType_OrganizationalUnitName, "OrganizationalUnitName" },
{ kOID_AttributeType_Title, "Title" },
{ kOID_AttributeType_Name, "Name" },
{ kOID_AttributeType_GivenName, "GivenName" },
{ kOID_AttributeType_Initials, "Initials" },
{ kOID_AttributeType_GenerationQualifier, "GenerationQualifier" },
{ kOID_AttributeType_DNQualifier, "DNQualifier" },
{ kOID_AttributeType_Pseudonym, "Pseudonym" },
{ kOID_AttributeType_DomainComponent, "DomainComponent" },
{ kOID_AttributeType_MatterNodeId, "MatterNodeId" },
{ kOID_AttributeType_MatterFirmwareSigningId, "MatterFirmwareSigningId" },
{ kOID_AttributeType_MatterICACId, "MatterICACId" },
{ kOID_AttributeType_MatterRCACId, "MatterRCACId" },
{ kOID_AttributeType_MatterFabricId, "MatterFabricId" },
{ kOID_AttributeType_MatterCASEAuthTag, "MatterCASEAuthTag" },
{ kOID_EllipticCurve_prime256v1, "prime256v1" },
{ kOID_Extension_BasicConstraints, "BasicConstraints" },
{ kOID_Extension_KeyUsage, "KeyUsage" },
{ kOID_Extension_ExtendedKeyUsage, "ExtendedKeyUsage" },
{ kOID_Extension_SubjectKeyIdentifier, "SubjectKeyIdentifier" },
{ kOID_Extension_AuthorityKeyIdentifier, "AuthorityKeyIdentifier" },
{ kOID_Extension_CSRRequest, "CSRRequest" },
{ kOID_KeyPurpose_ServerAuth, "ServerAuth" },
{ kOID_KeyPurpose_ClientAuth, "ClientAuth" },
{ kOID_KeyPurpose_CodeSigning, "CodeSigning" },
{ kOID_KeyPurpose_EmailProtection, "EmailProtection" },
{ kOID_KeyPurpose_TimeStamping, "TimeStamping" },
{ kOID_KeyPurpose_OCSPSigning, "OCSPSigning" },
{ kOID_NotSpecified, NULL }
};
#endif // ASN1_DEFINE_OID_NAME_TABLE
} // namespace ASN1
} // namespace chip

View File

@@ -0,0 +1,15 @@
#ifndef BLE_BLEBUILDCONFIG_H_
#define BLE_BLEBUILDCONFIG_H_
#include "sl_component_catalog.h"
#if defined(SL_CATALOG_MATTER_BLE_PRESENT)
#define CONFIG_NETWORK_LAYER_BLE 1
#else
#define CONFIG_NETWORK_LAYER_BLE 0
#endif
#define CHIP_ENABLE_CHIPOBLE_TEST 0
#define BLE_PROJECT_CONFIG_INCLUDE <CHIPProjectConfig.h>
#define BLE_PLATFORM_CONFIG_INCLUDE <platform/silabs/BlePlatformConfig.h>
#endif // BLE_BLEBUILDCONFIG_H_

View File

@@ -0,0 +1,41 @@
#ifndef CORE_CHIPBUILDCONFIG_H_
#define CORE_CHIPBUILDCONFIG_H_
#define SL_MATTER_LOG_NONE 0
#define SL_MATTER_LOG_ERROR 1
#define SL_MATTER_LOG_PROGRESS 3
#define SL_MATTER_LOG_DETAIL 4
#define SL_MATTER_LOG_AUTOMATION 5
#include "sl_matter_config.h"
#define CHIP_FUZZING_ENABLED 0
#define CHIP_CONFIG_TEST 0
#define CHIP_ERROR_LOGGING (SL_MATTER_LOG_LEVEL >= SL_MATTER_LOG_ERROR)
#define CHIP_PROGRESS_LOGGING (SL_MATTER_LOG_LEVEL >= SL_MATTER_LOG_PROGRESS)
#define CHIP_DETAIL_LOGGING (SL_MATTER_LOG_LEVEL >= SL_MATTER_LOG_DETAIL)
#define CHIP_AUTOMATION_LOGGING (SL_MATTER_LOG_LEVEL >= SL_MATTER_LOG_AUTOMATION)
#define CHIP_CONFIG_LOG_MESSAGE_MAX_SIZE 256
#define CHIP_PW_TOKENIZER_LOGGING 0
#define CHIP_USE_PW_LOGGING 0
#define CHIP_CONFIG_SHORT_ERROR_STR 1 // always 1 on embedded
#define CHIP_CONFIG_ENABLE_ARG_PARSER SL_MATTER_CLI_ARG_PARSER
#define CHIP_TARGET_STYLE_UNIX 0
#define CHIP_TARGET_STYLE_EMBEDDED 1
#define CHIP_CONFIG_MEMORY_MGMT_MALLOC 0
#define HAVE_MALLOC 0
#define HAVE_FREE 0
#define HAVE_NEW 0
#define CHIP_CONFIG_MEMORY_MGMT_PLATFORM 1 // always use "platform" allocator
#define CHIP_CONFIG_MEMORY_DEBUG_CHECKS 0
#define CHIP_CONFIG_MEMORY_DEBUG_DMALLOC 0
#define CHIP_CONFIG_PROVIDE_OBSOLESCENT_INTERFACES 0
#define CHIP_CONFIG_TRANSPORT_TRACE_ENABLED 0
#define CHIP_CONFIG_TRANSPORT_PW_TRACE_ENABLED 0
#define CHIP_CONFIG_MINMDNS_DYNAMIC_OPERATIONAL_RESPONDER_LIST 0
#define CHIP_CONFIG_MINMDNS_MAX_PARALLEL_RESOLVES 2
#endif // CORE_CHIPBUILDCONFIG_H_

View File

@@ -0,0 +1,10 @@
#ifndef CRYPTO_CRYPTOBUILDCONFIG_H_
#define CRYPTO_CRYPTOBUILDCONFIG_H_
#define CHIP_CRYPTO_MBEDTLS 0
#define CHIP_CRYPTO_OPENSSL 0
#define CHIP_WITH_OPENSSL 0
#define CHIP_CRYPTO_HSM 0
#define CHIP_CRYPTO_HSM_NXP 0
#endif // CRYPTO_CRYPTOBUILDCONFIG_H_

View File

@@ -0,0 +1,19 @@
#ifndef INET_INETBUILDCONFIG_H_
#define INET_INETBUILDCONFIG_H_
// TODO: enable IPv4 and TCP if using wifi
#define INET_CONFIG_TEST 0
#define INET_CONFIG_ENABLE_UDP_ENDPOINT 1
#define HAVE_LWIP_RAW_BIND_NETIF 1
#define INET_PROJECT_CONFIG_INCLUDE <CHIPProjectConfig.h>
#define INET_PLATFORM_CONFIG_INCLUDE <platform/silabs/InetPlatformConfig.h>
#define INET_CONFIG_ENABLE_IPV4 0
#define INET_CONFIG_ENABLE_TCP_ENDPOINT 0
#ifndef SL_WIFI
#define INET_TCP_END_POINT_IMPL_CONFIG_FILE <inet/TCPEndPointImplOpenThread.h>
#define INET_UDP_END_POINT_IMPL_CONFIG_FILE <inet/UDPEndPointImplOpenThread.h>
#else
#define INET_TCP_END_POINT_IMPL_CONFIG_FILE <inet/TCPEndPointImplLwIP.h>
#define INET_UDP_END_POINT_IMPL_CONFIG_FILE <inet/UDPEndPointImplLwIP.h>
#endif // SL_WIFI
#endif // INET_INETBUILDCONFIG_H_

View File

@@ -0,0 +1,10 @@
// Generated by write_buildconfig_header.py
// From "//third_party/connectedhomeip/src/lwip:gen_lwip_buildconfig"
#ifndef LWIP_LWIP_BUILDCONFIG_H_
#define LWIP_LWIP_BUILDCONFIG_H_
#define HAVE_LWIP_UDP_BIND_NETIF 1
#define LWIP_DEBUG 1
#endif // LWIP_LWIP_BUILDCONFIG_H_

View File

@@ -0,0 +1,13 @@
#ifndef MATTER_TRACING_BUILD_CONFIG_H_
#define MATTER_TRACING_BUILD_CONFIG_H_
// <<< Use Configuration Wizard in Context Menu >>>
// <q MATTER_TRACING_ENABLED> Matter Report on Entering Active
// <i> Default: 0
#define MATTER_TRACING_ENABLED 0
// <<< end of configuration section >>>
#endif // MATTER_TRACING_BUILD_CONFIG_H_

View File

@@ -0,0 +1,100 @@
/*
* Copyright (c) 2016, The OpenThread Authors.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the copyright holder nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file
* @brief
* This file defines the platform-specific functions needed by OpenThread's example applications.
*/
#ifndef OPENTHREAD_SYSTEM_H_
#define OPENTHREAD_SYSTEM_H_
#include <openthread/instance.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* This function performs all platform-specific initialization of OpenThread's drivers.
*
* @note This function is not called by the OpenThread library. Instead, the system/RTOS should call this function
* when initialization of OpenThread's drivers is most appropriate.
*
* @param[in] argc Number of arguments in @p argv.
* @param[in] argv Argument vector.
*
*/
void otSysInit(int argc, char *argv[]);
/**
* This function performs all platform-specific deinitialization for OpenThread's drivers.
*
* @note This function is not called by the OpenThread library. Instead, the system/RTOS should call this function
* when deinitialization of OpenThread's drivers is most appropriate.
*
*/
void otSysDeinit(void);
/**
* This function returns true if a pseudo-reset was requested.
*
* In such a case, the main loop should shut down and re-initialize the OpenThread instance.
*
* @note This function is not called by the OpenThread library. Instead, the system/RTOS should call this function
* in the main loop to determine when to shut down and re-initialize the OpenThread instance.
*
*/
bool otSysPseudoResetWasRequested(void);
/**
* This function performs all platform-specific processing for OpenThread's example applications.
*
* @note This function is not called by the OpenThread library. Instead, the system/RTOS should call this function
* in the main loop when processing OpenThread's drivers is most appropriate.
*
* @param[in] aInstance The OpenThread instance structure.
*
*/
void otSysProcessDrivers(otInstance *aInstance);
/**
* This function is called whenever platform drivers needs processing.
*
* @note This function is not handled by the OpenThread library. Instead, the system/RTOS should handle this function
* and schedule a call to `otSysProcessDrivers()`.
*
*/
extern void otSysEventSignalPending(void);
#ifdef __cplusplus
} // end of extern "C"
#endif
#endif // OPENTHREAD_SYSTEM_H_

View File

@@ -0,0 +1,51 @@
#ifndef PLATFORM_CHIPDEVICEBUILDCONFIG_H_
#define PLATFORM_CHIPDEVICEBUILDCONFIG_H_
// stack lock tracking config options
#define SL_MATTER_STACK_LOCK_TRACKING_NONE 0
#define SL_MATTER_STACK_LOCK_TRACKING_LOG 1
#define SL_MATTER_STACK_LOCK_TRACKING_FATAL 2
#include "sl_matter_config.h"
// inferred options
#include "sl_component_catalog.h"
// TODO: infer from wifi stack component
#define SL_MATTER_ENABLE_WIFI 0
// TODO: infer from OTA requestor component (split from generated matter efr32 platform component)
#if defined(SILABS_OTA_ENABLED)
#define SL_MATTER_ENABLE_OTA 1
#else
#define SL_MATTER_ENABLE_OTA 0
#endif
#if defined(SL_CATALOG_OPENTHREAD_STACK_PRESENT) || SL_OPENTHREAD_CERT_LIB
#define CHIP_ENABLE_OPENTHREAD 1
#if defined(OPENTHREAD_FTD)
#define CHIP_DEVICE_CONFIG_THREAD_FTD 1
#else
#define CHIP_DEVICE_CONFIG_THREAD_FTD 0
#endif
#endif
#define CHIP_DEVICE_CONFIG_ENABLE_WPA SL_MATTER_ENABLE_WIFI
#define CHIP_WITH_GIO SL_MATTER_ENABLE_WIFI
#define OPENTHREAD_CONFIG_ENABLE_TOBLE 0
//#define CHIP_STACK_LOCK_TRACKING_ENABLED (SL_MATTER_STACK_LOCK_TRACKING_MODE != SL_MATTER_STACK_LOCK_TRACKING_NONE)
#define CHIP_STACK_LOCK_TRACKING_ERROR_FATAL (SL_MATTER_STACK_LOCK_TRACKING_MODE == SL_MATTER_STACK_LOCK_TRACKING_FATAL)
#define CHIP_DEVICE_CONFIG_RUN_AS_ROOT 1
#define CHIP_DISABLE_PLATFORM_KVS 0
#define CHIP_DEVICE_CONFIG_ENABLE_OTA_REQUESTOR SL_MATTER_ENABLE_OTA
#define CHIP_DEVICE_PROJECT_CONFIG_INCLUDE <CHIPProjectConfig.h>
#define CHIP_DEVICE_PLATFORM_CONFIG_INCLUDE <platform/silabs/CHIPDevicePlatformConfig.h>
#define CHIP_DEVICE_LAYER_TARGET_EFR32 1
#define CHIP_DEVICE_LAYER_TARGET silabs
#define CHIP_DEVICE_CONFIG_THREAD_NETWORK_ENDPOINT_ID 0
// Enable default/generic test-mode CommissionableDataProvider in GenericConfigurationManagerImpl
// === FOR TRANSITION UNTIL ALL EXAMPLES PROVIDE THEIR OWN ===
#define CHIP_USE_TRANSITIONAL_COMMISSIONABLE_DATA_PROVIDER 0
#endif // PLATFORM_CHIPDEVICEBUILDCONFIG_H_

View File

@@ -0,0 +1,7 @@
// Generated by write_buildconfig_header.py
// From "//third_party/connectedhomeip/src/setup_payload:gen_additional_data_payload_buildconfig"
#ifndef SETUP_PAYLOAD_CHIPADDITIONALDATAPAYLOADBUILDCONFIG_H_
#define SETUP_PAYLOAD_CHIPADDITIONALDATAPAYLOADBUILDCONFIG_H_
#endif // SETUP_PAYLOAD_CHIPADDITIONALDATAPAYLOADBUILDCONFIG_H_

View File

@@ -0,0 +1,40 @@
// Generated by write_buildconfig_header.py
// From "//third_party/connectedhomeip/src/system:gen_system_buildconfig"
#ifndef SYSTEM_SYSTEMBUILDCONFIG_H_
#define SYSTEM_SYSTEMBUILDCONFIG_H_
#define CONFIG_DEVICE_LAYER 1
#define CHIP_SYSTEM_CONFIG_TEST 0
#define CHIP_WITH_NLFAULTINJECTION 0
#define CHIP_SYSTEM_CONFIG_USE_DISPATCH 0
#ifndef SL_WIFI
#define CHIP_SYSTEM_CONFIG_USE_LWIP 0
#define CHIP_SYSTEM_CONFIG_USE_OPEN_THREAD_ENDPOINT 1
#else
#define CHIP_SYSTEM_CONFIG_USE_LWIP 1
#define CHIP_SYSTEM_CONFIG_USE_OPEN_THREAD_ENDPOINT 0
#endif // SL_WIFI
#define CHIP_SYSTEM_CONFIG_USE_SOCKETS 0
#define CHIP_SYSTEM_CONFIG_USE_NETWORK_FRAMEWORK 0
#define CHIP_SYSTEM_CONFIG_POSIX_LOCKING 0
#define CHIP_SYSTEM_CONFIG_FREERTOS_LOCKING 1
#define CHIP_SYSTEM_CONFIG_MBED_LOCKING 0
#define CHIP_SYSTEM_CONFIG_NO_LOCKING 0
#define CHIP_SYSTEM_CONFIG_PROVIDE_STATISTICS 1
#define HAVE_CLOCK_GETTIME 1
#define HAVE_CLOCK_SETTIME 1
#define HAVE_GETTIMEOFDAY 0
#define HAVE_SYS_TIME_H 1
#define HAVE_NETINET_ICMP6_H 1
#define HAVE_ICMP6_FILTER 1
#define CONFIG_HAVE_VCBPRINTF 0
#define CONFIG_HAVE_VSNPRINTF_EX 0
#define HAVE_SYS_SOCKET_H 0
#define CHIP_PROJECT_CONFIG_INCLUDE <CHIPProjectConfig.h>
#define CHIP_PLATFORM_CONFIG_INCLUDE <platform/silabs/CHIPPlatformConfig.h>
#define SYSTEM_PROJECT_CONFIG_INCLUDE <CHIPProjectConfig.h>
#define SYSTEM_PLATFORM_CONFIG_INCLUDE <platform/silabs/SystemPlatformConfig.h>
#define CHIP_SYSTEM_LAYER_IMPL_CONFIG_FILE <system/SystemLayerImplFreeRTOS.h>
#endif // SYSTEM_SYSTEMBUILDCONFIG_H_

View File

@@ -0,0 +1,374 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <DeviceInfoProviderImpl.h>
#include <lib/core/TLV.h>
#include <lib/support/CHIPMemString.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/DefaultStorageKeyAllocator.h>
#include <lib/support/SafeInt.h>
#include <platform/internal/CHIPDeviceLayerInternal.h>
#include <stdlib.h>
#include <string.h>
#include <cstring>
namespace chip {
namespace DeviceLayer {
namespace {
constexpr TLV::Tag kLabelNameTag = TLV::ContextTag(0);
constexpr TLV::Tag kLabelValueTag = TLV::ContextTag(1);
} // anonymous namespace
DeviceInfoProviderImpl & DeviceInfoProviderImpl::GetDefaultInstance()
{
static DeviceInfoProviderImpl sInstance;
return sInstance;
}
DeviceInfoProvider::FixedLabelIterator * DeviceInfoProviderImpl::IterateFixedLabel(EndpointId endpoint)
{
return chip::Platform::New<FixedLabelIteratorImpl>(endpoint);
}
DeviceInfoProviderImpl::FixedLabelIteratorImpl::FixedLabelIteratorImpl(EndpointId endpoint) : mEndpoint(endpoint)
{
mIndex = 0;
}
size_t DeviceInfoProviderImpl::FixedLabelIteratorImpl::Count()
{
// A hardcoded labelList on all endpoints.
return 4;
}
bool DeviceInfoProviderImpl::FixedLabelIteratorImpl::Next(FixedLabelType & output)
{
bool retval = true;
// A hardcoded list for testing only
CHIP_ERROR err = CHIP_NO_ERROR;
const char * labelPtr = nullptr;
const char * valuePtr = nullptr;
VerifyOrReturnError(mIndex < 4, false);
ChipLogProgress(DeviceLayer, "Get the fixed label with index:%u at endpoint:%d", static_cast<unsigned>(mIndex), mEndpoint);
switch (mIndex)
{
case 0:
labelPtr = "room";
valuePtr = "bedroom 2";
break;
case 1:
labelPtr = "orientation";
valuePtr = "North";
break;
case 2:
labelPtr = "floor";
valuePtr = "2";
break;
case 3:
labelPtr = "direction";
valuePtr = "up";
break;
default:
err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND;
break;
}
if (err == CHIP_NO_ERROR)
{
VerifyOrReturnError(std::strlen(labelPtr) <= kMaxLabelNameLength, false);
VerifyOrReturnError(std::strlen(valuePtr) <= kMaxLabelValueLength, false);
Platform::CopyString(mFixedLabelNameBuf, labelPtr);
Platform::CopyString(mFixedLabelValueBuf, valuePtr);
output.label = CharSpan::fromCharString(mFixedLabelNameBuf);
output.value = CharSpan::fromCharString(mFixedLabelValueBuf);
mIndex++;
retval = true;
}
else
{
retval = false;
}
return retval;
}
CHIP_ERROR DeviceInfoProviderImpl::SetUserLabelLength(EndpointId endpoint, size_t val)
{
return mStorage->SyncSetKeyValue(DefaultStorageKeyAllocator::UserLabelLengthKey(endpoint).KeyName(), &val,
static_cast<uint16_t>(sizeof(val)));
}
CHIP_ERROR DeviceInfoProviderImpl::GetUserLabelLength(EndpointId endpoint, size_t & val)
{
uint16_t len = static_cast<uint16_t>(sizeof(val));
return mStorage->SyncGetKeyValue(DefaultStorageKeyAllocator::UserLabelLengthKey(endpoint).KeyName(), &val, len);
}
CHIP_ERROR DeviceInfoProviderImpl::SetUserLabelAt(EndpointId endpoint, size_t index, const UserLabelType & userLabel)
{
VerifyOrReturnError(CanCastTo<uint32_t>(index), CHIP_ERROR_INVALID_ARGUMENT);
uint8_t buf[UserLabelTLVMaxSize()];
TLV::TLVWriter writer;
writer.Init(buf);
TLV::TLVType outerType;
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Structure, outerType));
ReturnErrorOnFailure(writer.PutString(kLabelNameTag, userLabel.label));
ReturnErrorOnFailure(writer.PutString(kLabelValueTag, userLabel.value));
ReturnErrorOnFailure(writer.EndContainer(outerType));
return mStorage->SyncSetKeyValue(
DefaultStorageKeyAllocator::UserLabelIndexKey(endpoint, static_cast<uint32_t>(index)).KeyName(), buf,
static_cast<uint16_t>(writer.GetLengthWritten()));
}
CHIP_ERROR DeviceInfoProviderImpl::DeleteUserLabelAt(EndpointId endpoint, size_t index)
{
return mStorage->SyncDeleteKeyValue(
DefaultStorageKeyAllocator::UserLabelIndexKey(endpoint, static_cast<uint32_t>(index)).KeyName());
}
DeviceInfoProvider::UserLabelIterator * DeviceInfoProviderImpl::IterateUserLabel(EndpointId endpoint)
{
return chip::Platform::New<UserLabelIteratorImpl>(*this, endpoint);
}
DeviceInfoProviderImpl::UserLabelIteratorImpl::UserLabelIteratorImpl(DeviceInfoProviderImpl & provider, EndpointId endpoint) :
mProvider(provider), mEndpoint(endpoint)
{
size_t total = 0;
ReturnOnFailure(mProvider.GetUserLabelLength(mEndpoint, total));
mTotal = total;
mIndex = 0;
}
bool DeviceInfoProviderImpl::UserLabelIteratorImpl::Next(UserLabelType & output)
{
CHIP_ERROR err = CHIP_NO_ERROR;
VerifyOrReturnError(mIndex < mTotal, false);
VerifyOrReturnError(CanCastTo<uint32_t>(mIndex), false);
uint8_t buf[UserLabelTLVMaxSize()];
uint16_t len = static_cast<uint16_t>(sizeof(buf));
err = mProvider.mStorage->SyncGetKeyValue(
DefaultStorageKeyAllocator::UserLabelIndexKey(mEndpoint, static_cast<uint32_t>(mIndex)).KeyName(), buf, len);
VerifyOrReturnError(err == CHIP_NO_ERROR, false);
TLV::ContiguousBufferTLVReader reader;
reader.Init(buf);
err = reader.Next(TLV::kTLVType_Structure, TLV::AnonymousTag());
VerifyOrReturnError(err == CHIP_NO_ERROR, false);
TLV::TLVType containerType;
VerifyOrReturnError(reader.EnterContainer(containerType) == CHIP_NO_ERROR, false);
chip::CharSpan label;
chip::CharSpan value;
VerifyOrReturnError(reader.Next(kLabelNameTag) == CHIP_NO_ERROR, false);
VerifyOrReturnError(reader.Get(label) == CHIP_NO_ERROR, false);
VerifyOrReturnError(reader.Next(kLabelValueTag) == CHIP_NO_ERROR, false);
VerifyOrReturnError(reader.Get(value) == CHIP_NO_ERROR, false);
VerifyOrReturnError(reader.VerifyEndOfContainer() == CHIP_NO_ERROR, false);
VerifyOrReturnError(reader.ExitContainer(containerType) == CHIP_NO_ERROR, false);
Platform::CopyString(mUserLabelNameBuf, label);
Platform::CopyString(mUserLabelValueBuf, value);
output.label = CharSpan::fromCharString(mUserLabelNameBuf);
output.value = CharSpan::fromCharString(mUserLabelValueBuf);
mIndex++;
return true;
}
DeviceInfoProvider::SupportedLocalesIterator * DeviceInfoProviderImpl::IterateSupportedLocales()
{
return chip::Platform::New<SupportedLocalesIteratorImpl>();
}
size_t DeviceInfoProviderImpl::SupportedLocalesIteratorImpl::Count()
{
// Hardcoded list of locales
// {("en-US"), ("de-DE"), ("fr-FR"), ("en-GB"), ("es-ES"), ("zh-CN"), ("it-IT"), ("ja-JP")}
return 8;
}
bool DeviceInfoProviderImpl::SupportedLocalesIteratorImpl::Next(CharSpan & output)
{
bool retval = true;
// Hardcoded list of locales
CHIP_ERROR err = CHIP_NO_ERROR;
const char * activeLocalePtr = nullptr;
VerifyOrReturnError(mIndex < 8, false);
switch (mIndex)
{
case 0:
activeLocalePtr = "en-US";
break;
case 1:
activeLocalePtr = "de-DE";
break;
case 2:
activeLocalePtr = "fr-FR";
break;
case 3:
activeLocalePtr = "en-GB";
break;
case 4:
activeLocalePtr = "es-ES";
break;
case 5:
activeLocalePtr = "zh-CN";
break;
case 6:
activeLocalePtr = "it-IT";
break;
case 7:
activeLocalePtr = "ja-JP";
break;
default:
err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND;
break;
}
if (err == CHIP_NO_ERROR)
{
VerifyOrReturnError(std::strlen(activeLocalePtr) <= kMaxActiveLocaleLength, false);
Platform::CopyString(mActiveLocaleBuf, kMaxActiveLocaleLength + 1, activeLocalePtr);
output = CharSpan::fromCharString(mActiveLocaleBuf);
mIndex++;
retval = true;
}
else
{
retval = false;
}
return retval;
}
DeviceInfoProvider::SupportedCalendarTypesIterator * DeviceInfoProviderImpl::IterateSupportedCalendarTypes()
{
return chip::Platform::New<SupportedCalendarTypesIteratorImpl>();
}
size_t DeviceInfoProviderImpl::SupportedCalendarTypesIteratorImpl::Count()
{
// Hardcoded list of strings
// {("kBuddhist"), ("kChinese"), ("kCoptic"), ("kEthiopian"), ("kGregorian"), ("kHebrew"), ("kIndian"), ("kJapanese"),
// ("kKorean"), ("kPersian"), ("kTaiwanese"), ("kIslamic")}
return 12;
}
bool DeviceInfoProviderImpl::SupportedCalendarTypesIteratorImpl::Next(CalendarType & output)
{
bool retval = true;
// Hardcoded list of Strings that are valid values for the Calendar Types.
CHIP_ERROR err = CHIP_NO_ERROR;
VerifyOrReturnError(mIndex < 12, false);
switch (mIndex)
{
case 0:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kBuddhist;
break;
case 1:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kChinese;
break;
case 2:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kCoptic;
break;
case 3:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kEthiopian;
break;
case 4:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kGregorian;
break;
case 5:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kHebrew;
break;
case 6:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kIndian;
break;
case 7:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kJapanese;
break;
case 8:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kKorean;
break;
case 9:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kPersian;
break;
case 10:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kTaiwanese;
break;
case 11:
output = app::Clusters::TimeFormatLocalization::CalendarTypeEnum::kIslamic;
break;
default:
err = CHIP_ERROR_PERSISTED_STORAGE_VALUE_NOT_FOUND;
break;
}
if (err == CHIP_NO_ERROR)
{
mIndex++;
retval = true;
}
else
{
retval = false;
}
return retval;
}
} // namespace DeviceLayer
} // namespace chip

View File

@@ -0,0 +1,107 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <lib/support/EnforceFormat.h>
#include <platform/DeviceInfoProvider.h>
namespace chip {
namespace DeviceLayer {
class DeviceInfoProviderImpl : public DeviceInfoProvider
{
public:
DeviceInfoProviderImpl() = default;
~DeviceInfoProviderImpl() override {}
// Iterators
FixedLabelIterator * IterateFixedLabel(EndpointId endpoint) override;
UserLabelIterator * IterateUserLabel(EndpointId endpoint) override;
SupportedLocalesIterator * IterateSupportedLocales() override;
SupportedCalendarTypesIterator * IterateSupportedCalendarTypes() override;
static DeviceInfoProviderImpl & GetDefaultInstance();
protected:
class FixedLabelIteratorImpl : public FixedLabelIterator
{
public:
FixedLabelIteratorImpl(EndpointId endpoint);
size_t Count() override;
bool Next(FixedLabelType & output) override;
void Release() override { chip::Platform::Delete(this); }
private:
EndpointId mEndpoint = 0;
size_t mIndex = 0;
char mFixedLabelNameBuf[kMaxLabelNameLength + 1];
char mFixedLabelValueBuf[kMaxLabelValueLength + 1];
};
class UserLabelIteratorImpl : public UserLabelIterator
{
public:
UserLabelIteratorImpl(DeviceInfoProviderImpl & provider, EndpointId endpoint);
size_t Count() override { return mTotal; }
bool Next(UserLabelType & output) override;
void Release() override { chip::Platform::Delete(this); }
private:
DeviceInfoProviderImpl & mProvider;
EndpointId mEndpoint = 0;
size_t mIndex = 0;
size_t mTotal = 0;
char mUserLabelNameBuf[kMaxLabelNameLength + 1];
char mUserLabelValueBuf[kMaxLabelValueLength + 1];
};
class SupportedLocalesIteratorImpl : public SupportedLocalesIterator
{
public:
SupportedLocalesIteratorImpl() = default;
size_t Count() override;
bool Next(CharSpan & output) override;
void Release() override { chip::Platform::Delete(this); }
private:
size_t mIndex = 0;
char mActiveLocaleBuf[kMaxActiveLocaleLength + 1];
};
class SupportedCalendarTypesIteratorImpl : public SupportedCalendarTypesIterator
{
public:
SupportedCalendarTypesIteratorImpl() = default;
size_t Count() override;
bool Next(CalendarType & output) override;
void Release() override { chip::Platform::Delete(this); }
private:
size_t mIndex = 0;
};
CHIP_ERROR SetUserLabelLength(EndpointId endpoint, size_t val) override;
CHIP_ERROR GetUserLabelLength(EndpointId endpoint, size_t & val) override;
CHIP_ERROR SetUserLabelAt(EndpointId endpoint, size_t index, const UserLabelType & userLabel) override;
CHIP_ERROR DeleteUserLabelAt(EndpointId endpoint, size_t index) override;
private:
static constexpr size_t UserLabelTLVMaxSize() { return TLV::EstimateStructOverhead(kMaxLabelNameLength, kMaxLabelValueLength); }
};
} // namespace DeviceLayer
} // namespace chip

View File

@@ -0,0 +1,72 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <lib/shell/Engine.h>
#include <crypto/RandUtils.h>
#include <lib/core/CHIPCore.h>
#include <lib/support/Base64.h>
#include <lib/support/CHIPArgParser.hpp>
#include <lib/support/CodeUtils.h>
#include <inttypes.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ChipShellCollection.h>
using namespace chip;
using namespace chip::Shell;
using namespace chip::Logging;
CHIP_ERROR cmd_echo(int argc, char ** argv)
{
for (int i = 0; i < argc; i++)
{
streamer_printf(streamer_get(), "%s ", argv[i]);
}
streamer_printf(streamer_get(), "\n\r");
return CHIP_NO_ERROR;
}
CHIP_ERROR cmd_log(int argc, char ** argv)
{
for (int i = 0; i < argc; i++)
{
ChipLogProgress(chipTool, "%s", argv[i]);
}
return CHIP_NO_ERROR;
}
CHIP_ERROR cmd_rand(int argc, char ** argv)
{
streamer_printf(streamer_get(), "%d\n\r", static_cast<int>(chip::Crypto::GetRandU8()));
return CHIP_NO_ERROR;
}
static shell_command_t cmds_misc[] = {
{ &cmd_echo, "echo", "Echo back provided inputs" },
{ &cmd_log, "log", "Logging utilities" },
{ &cmd_rand, "rand", "Random number utilities" },
};
void cmd_misc_init()
{
Engine::Root().RegisterCommands(cmds_misc, ArraySize(cmds_misc));
}

View File

@@ -0,0 +1,204 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <lib/core/CHIPCore.h>
#include <ChipShellCollection.h>
#if CONFIG_DEVICE_LAYER
#include <platform/CHIPDeviceLayer.h>
#endif
#if CHIP_ENABLE_OPENTHREAD
#include <stdio.h>
#include <lib/shell/Engine.h>
#include <lib/support/CHIPArgParser.hpp>
#include <lib/support/CHIPMem.h>
#include <lib/support/CodeUtils.h>
#include <platform/ThreadStackManager.h>
#if CHIP_TARGET_STYLE_EMBEDDED
#include <openthread/cli.h>
#include <openthread/instance.h>
#include <openthread/ip6.h>
#include <openthread/link.h>
#include <openthread/thread.h>
#if OPENTHREAD_API_VERSION >= 85
#if !CHIP_DEVICE_CONFIG_THREAD_ENABLE_CLI
#ifndef SHELL_OTCLI_TX_BUFFER_SIZE
#define SHELL_OTCLI_TX_BUFFER_SIZE 1024
#endif
static char sTxBuffer[SHELL_OTCLI_TX_BUFFER_SIZE];
static constexpr uint16_t sTxLength = SHELL_OTCLI_TX_BUFFER_SIZE;
#endif // !CHIP_DEVICE_CONFIG_THREAD_ENABLE_CLI)
#endif
static constexpr uint16_t kMaxLineLength = 384;
#else
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#endif
using namespace chip;
using namespace chip::Shell;
using namespace chip::Platform;
using namespace chip::DeviceLayer;
using namespace chip::Logging;
using namespace chip::ArgParser;
static chip::Shell::Engine sShellOtcliSubcommands;
CHIP_ERROR cmd_otcli_help_iterator(shell_command_t * command, void * arg)
{
streamer_printf(streamer_get(), " %-15s %s\n\r", command->cmd_name, command->cmd_help);
return CHIP_NO_ERROR;
}
CHIP_ERROR cmd_otcli_help(int argc, char ** argv)
{
sShellOtcliSubcommands.ForEachCommand(cmd_otcli_help_iterator, nullptr);
return CHIP_NO_ERROR;
}
#if CHIP_TARGET_STYLE_EMBEDDED
CHIP_ERROR cmd_otcli_dispatch(int argc, char ** argv)
{
CHIP_ERROR error = CHIP_NO_ERROR;
char buff[kMaxLineLength] = { 0 };
char * buff_ptr = buff;
int i = 0;
VerifyOrExit(argc > 0, error = CHIP_ERROR_INVALID_ARGUMENT);
for (i = 0; i < argc; i++)
{
size_t arg_len = strlen(argv[i]);
/* Make sure that the next argument won't overflow the buffer */
VerifyOrExit(buff_ptr + arg_len < buff + kMaxLineLength, error = CHIP_ERROR_BUFFER_TOO_SMALL);
strncpy(buff_ptr, argv[i], arg_len);
buff_ptr += arg_len;
/* Make sure that there is enough buffer for a space char */
if (buff_ptr + sizeof(char) < buff + kMaxLineLength)
{
strncpy(buff_ptr, " ", sizeof(char));
buff_ptr++;
}
}
buff_ptr = 0;
chip::DeviceLayer::ThreadStackMgr().LockThreadStack();
#if OPENTHREAD_API_VERSION >= 85
otCliInputLine(buff);
#else
otCliConsoleInputLine(buff, buff_ptr - buff);
#endif
chip::DeviceLayer::ThreadStackMgr().UnlockThreadStack();
exit:
return error;
}
#elif CHIP_TARGET_STYLE_UNIX
CHIP_ERROR cmd_otcli_dispatch(int argc, char ** argv)
{
int pid;
uid_t euid = geteuid();
char ctl_command[] = "/usr/local/sbin/ot-ctl";
// Must run as sudo.
if (euid != 0)
{
streamer_printf(streamer_get(), "Error otcli: requires running chip-shell as sudo\n\r");
return CHIP_ERROR_INCORRECT_STATE;
}
VerifyOrReturnError(argc > 0, CHIP_ERROR_INVALID_ARGUMENT);
// Fork and execute the command.
pid = fork();
VerifyOrReturnError(pid != -1, CHIP_ERROR_INCORRECT_STATE);
if (pid == 0)
{
// Child process to execute the command with provided arguments
--argv; // Restore access to entry [0] containing the command;
argv[0] = ctl_command;
if (execvp(ctl_command, argv) < 0)
{
streamer_printf(streamer_get(), "Error exec %s: %s\n", ctl_command, strerror(errno));
}
exit(errno);
}
else
{
// Parent process to wait on child.
int status;
wait(&status);
return (status) ? CHIP_ERROR_INCORRECT_STATE : CHIP_NO_ERROR;
}
}
#endif // CHIP_TARGET_STYLE_UNIX
static const shell_command_t cmds_otcli_root = { &cmd_otcli_dispatch, "otcli", "Dispatch OpenThread CLI command" };
#if CHIP_TARGET_STYLE_EMBEDDED
#if OPENTHREAD_API_VERSION >= 85
#if !CHIP_DEVICE_CONFIG_THREAD_ENABLE_CLI
static int OnOtCliOutput(void * aContext, const char * aFormat, va_list aArguments)
{
int rval = vsnprintf(sTxBuffer, sTxLength, aFormat, aArguments);
VerifyOrExit(rval >= 0 && rval < sTxLength, rval = CHIP_ERROR_BUFFER_TOO_SMALL.AsInteger());
return streamer_write(streamer_get(), (const char *) sTxBuffer, rval);
exit:
return rval;
}
#endif // !CHIP_DEVICE_CONFIG_THREAD_ENABLE_CLI
#else
static int OnOtCliOutput(const char * aBuf, uint16_t aBufLength, void * aContext)
{
return streamer_write(streamer_get(), aBuf, aBufLength);
}
#endif
#endif
#endif // CHIP_ENABLE_OPENTHREAD
void cmd_otcli_init()
{
#if CHIP_ENABLE_OPENTHREAD
#if CHIP_TARGET_STYLE_EMBEDDED
#if !CHIP_DEVICE_CONFIG_THREAD_ENABLE_CLI
#if OPENTHREAD_API_VERSION >= 85
otCliInit(otInstanceInitSingle(), &OnOtCliOutput, NULL);
#else
otCliConsoleInit(otInstanceInitSingle(), &OnOtCliOutput, NULL);
#endif // OPENTHREAD_API_VERSION >= 85
#endif // !CHIP_DEVICE_CONFIG_THREAD_ENABLE_CLI
#endif // CHIP_TARGET_STYLE_EMBEDDED
// Register the root otcli command with the top-level shell.
Engine::Root().RegisterCommands(&cmds_otcli_root, 1);
#endif // CHIP_ENABLE_OPENTHREAD
}

View File

@@ -0,0 +1,33 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <Globals.h>
chip::FabricTable gFabricTable;
chip::secure_channel::MessageCounterManager gMessageCounterManager;
chip::Messaging::ExchangeManager gExchangeManager;
chip::SessionManager gSessionManager;
chip::Inet::IPAddress gDestAddr;
chip::SessionHolder gSession;
chip::TestPersistentStorageDelegate gStorage;
chip::FabricIndex gFabricIndex = 0;
#if INET_CONFIG_ENABLE_TCP_ENDPOINT
chip::TransportMgr<chip::Transport::TCP<kMaxTcpActiveConnectionCount, kMaxTcpPendingPackets>> gTCPManager;
#endif
chip::TransportMgr<chip::Transport::UDP> gUDPManager;

View File

@@ -0,0 +1,23 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
// A list of shell commands provided by ChipShell
void cmd_misc_init(void);
void cmd_otcli_init(void);
void cmd_app_server_init(void);

View File

@@ -0,0 +1,51 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <credentials/FabricTable.h>
#include <lib/core/CHIPCore.h>
#include <lib/support/TestPersistentStorageDelegate.h>
#include <messaging/ExchangeMgr.h>
#include <protocols/secure_channel/MessageCounterManager.h>
#include <transport/SessionHolder.h>
#include <transport/SessionManager.h>
#if INET_CONFIG_ENABLE_TCP_ENDPOINT
#include <transport/raw/TCP.h>
#endif // INET_CONFIG_ENABLE_TCP_ENDPOINT
#include <transport/raw/UDP.h>
#if INET_CONFIG_ENABLE_TCP_ENDPOINT
inline constexpr size_t kMaxTcpActiveConnectionCount = 4;
inline constexpr size_t kMaxTcpPendingPackets = 4;
#endif
inline constexpr chip::System::Clock::Timeout kResponseTimeOut = chip::System::Clock::Seconds16(1);
extern chip::FabricTable gFabricTable;
extern chip::secure_channel::MessageCounterManager gMessageCounterManager;
extern chip::Messaging::ExchangeManager gExchangeManager;
extern chip::SessionManager gSessionManager;
extern chip::Inet::IPAddress gDestAddr;
extern chip::SessionHolder gSession;
extern chip::TestPersistentStorageDelegate gStorage;
extern chip::FabricIndex gFabricIndex;
#if INET_CONFIG_ENABLE_TCP_ENDPOINT
extern chip::TransportMgr<chip::Transport::TCP<kMaxTcpActiveConnectionCount, kMaxTcpPendingPackets>> gTCPManager;
#endif
extern chip::TransportMgr<chip::Transport::UDP> gUDPManager;

View File

@@ -0,0 +1,22 @@
/*
*
* Copyright (c) 2024 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#if CHIP_HAVE_CONFIG_H
#include <access/AccessBuildConfig.h>
#endif

View File

@@ -0,0 +1,729 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Included for the default AccessControlDelegate logging enables/disables.
// See `chip_access_control_policy_logging_verbosity` in `src/app/BUILD.gn` for
// the levels available.
#include <app/AppConfig.h>
#include "AccessControl.h"
#include <lib/core/Global.h>
namespace chip {
namespace Access {
using chip::CATValues;
using chip::FabricIndex;
using chip::NodeId;
namespace {
Global<AccessControl> defaultAccessControl;
AccessControl * globalAccessControl = nullptr; // lazily defaulted to defaultAccessControl in GetAccessControl
static_assert(((unsigned(Privilege::kAdminister) & unsigned(Privilege::kManage)) == 0) &&
((unsigned(Privilege::kAdminister) & unsigned(Privilege::kOperate)) == 0) &&
((unsigned(Privilege::kAdminister) & unsigned(Privilege::kView)) == 0) &&
((unsigned(Privilege::kAdminister) & unsigned(Privilege::kProxyView)) == 0) &&
((unsigned(Privilege::kManage) & unsigned(Privilege::kOperate)) == 0) &&
((unsigned(Privilege::kManage) & unsigned(Privilege::kView)) == 0) &&
((unsigned(Privilege::kManage) & unsigned(Privilege::kProxyView)) == 0) &&
((unsigned(Privilege::kOperate) & unsigned(Privilege::kView)) == 0) &&
((unsigned(Privilege::kOperate) & unsigned(Privilege::kProxyView)) == 0) &&
((unsigned(Privilege::kView) & unsigned(Privilege::kProxyView)) == 0),
"Privilege bits must be unique");
bool CheckRequestPrivilegeAgainstEntryPrivilege(Privilege requestPrivilege, Privilege entryPrivilege)
{
switch (entryPrivilege)
{
case Privilege::kView:
return requestPrivilege == Privilege::kView;
case Privilege::kProxyView:
return requestPrivilege == Privilege::kProxyView || requestPrivilege == Privilege::kView;
case Privilege::kOperate:
return requestPrivilege == Privilege::kOperate || requestPrivilege == Privilege::kView;
case Privilege::kManage:
return requestPrivilege == Privilege::kManage || requestPrivilege == Privilege::kOperate ||
requestPrivilege == Privilege::kView;
case Privilege::kAdminister:
return requestPrivilege == Privilege::kAdminister || requestPrivilege == Privilege::kManage ||
requestPrivilege == Privilege::kOperate || requestPrivilege == Privilege::kView ||
requestPrivilege == Privilege::kProxyView;
}
return false;
}
constexpr bool IsValidCaseNodeId(NodeId aNodeId)
{
if (IsOperationalNodeId(aNodeId))
{
return true;
}
if (IsCASEAuthTag(aNodeId) && (GetCASEAuthTagVersion(CASEAuthTagFromNodeId(aNodeId)) != 0))
{
return true;
}
return false;
}
constexpr bool IsValidGroupNodeId(NodeId aNodeId)
{
return IsGroupId(aNodeId) && IsValidGroupId(GroupIdFromNodeId(aNodeId));
}
#if CHIP_PROGRESS_LOGGING && CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
char GetAuthModeStringForLogging(AuthMode authMode)
{
switch (authMode)
{
case AuthMode::kNone:
return 'n';
case AuthMode::kPase:
return 'p';
case AuthMode::kCase:
return 'c';
case AuthMode::kGroup:
return 'g';
}
return 'u';
}
constexpr int kCharsPerCatForLogging = 11; // including final null terminator
char * GetCatStringForLogging(char * buf, size_t size, const CATValues & cats)
{
if (size == 0)
{
return nullptr;
}
char * p = buf;
char * const end = buf + size;
*p = '\0';
// Format string chars needed:
// 1 for comma (optional)
// 2 for 0x prefix
// 8 for 32-bit hex value
// 1 for null terminator (at end)
static constexpr char fmtWithoutComma[] = "0x%08" PRIX32;
static constexpr char fmtWithComma[] = ",0x%08" PRIX32;
constexpr int countWithoutComma = 10;
constexpr int countWithComma = countWithoutComma + 1;
bool withComma = false;
for (auto cat : cats.values)
{
if (cat == chip::kUndefinedCAT)
{
break;
}
snprintf(p, static_cast<size_t>(end - p), withComma ? fmtWithComma : fmtWithoutComma, cat);
p += withComma ? countWithComma : countWithoutComma;
if (p >= end)
{
// Output was truncated.
p = end - ((size < 4) ? size : 4);
while (*p)
{
// Indicate truncation if possible.
*p++ = '.';
}
break;
}
withComma = true;
}
return buf;
}
char GetPrivilegeStringForLogging(Privilege privilege)
{
switch (privilege)
{
case Privilege::kView:
return 'v';
case Privilege::kProxyView:
return 'p';
case Privilege::kOperate:
return 'o';
case Privilege::kManage:
return 'm';
case Privilege::kAdminister:
return 'a';
}
return 'u';
}
char GetRequestTypeStringForLogging(RequestType requestType)
{
switch (requestType)
{
case RequestType::kAttributeReadRequest:
return 'r';
case RequestType::kAttributeWriteRequest:
return 'w';
case RequestType::kCommandInvokeRequest:
return 'i';
case RequestType::kEventReadRequest:
return 'e';
default:
return '?';
}
}
#endif // CHIP_PROGRESS_LOGGING && CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
} // namespace
Global<AccessControl::Entry::Delegate> AccessControl::Entry::mDefaultDelegate;
Global<AccessControl::EntryIterator::Delegate> AccessControl::EntryIterator::mDefaultDelegate;
CHIP_ERROR AccessControl::Init(AccessControl::Delegate * delegate, DeviceTypeResolver & deviceTypeResolver)
{
VerifyOrReturnError(!IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
ChipLogProgress(DataManagement, "AccessControl: initializing");
VerifyOrReturnError(delegate != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
CHIP_ERROR retval = delegate->Init();
if (retval == CHIP_NO_ERROR)
{
mDelegate = delegate;
mDeviceTypeResolver = &deviceTypeResolver;
}
return retval;
}
void AccessControl::Finish()
{
VerifyOrReturn(IsInitialized());
ChipLogProgress(DataManagement, "AccessControl: finishing");
mDelegate->Finish();
mDelegate = nullptr;
}
CHIP_ERROR AccessControl::CreateEntry(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t * index,
const Entry & entry)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
size_t count = 0;
size_t maxCount = 0;
ReturnErrorOnFailure(mDelegate->GetEntryCount(fabric, count));
ReturnErrorOnFailure(mDelegate->GetMaxEntriesPerFabric(maxCount));
VerifyOrReturnError((count + 1) <= maxCount, CHIP_ERROR_BUFFER_TOO_SMALL);
ReturnErrorCodeIf(!IsValid(entry), CHIP_ERROR_INVALID_ARGUMENT);
size_t i = 0;
ReturnErrorOnFailure(mDelegate->CreateEntry(&i, entry, &fabric));
if (index)
{
*index = i;
}
NotifyEntryChanged(subjectDescriptor, fabric, i, &entry, EntryListener::ChangeType::kAdded);
return CHIP_NO_ERROR;
}
CHIP_ERROR AccessControl::UpdateEntry(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index,
const Entry & entry)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
ReturnErrorCodeIf(!IsValid(entry), CHIP_ERROR_INVALID_ARGUMENT);
ReturnErrorOnFailure(mDelegate->UpdateEntry(index, entry, &fabric));
NotifyEntryChanged(subjectDescriptor, fabric, index, &entry, EntryListener::ChangeType::kUpdated);
return CHIP_NO_ERROR;
}
CHIP_ERROR AccessControl::DeleteEntry(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
Entry entry;
Entry * p = nullptr;
if (mEntryListener != nullptr && ReadEntry(fabric, index, entry) == CHIP_NO_ERROR)
{
p = &entry;
}
ReturnErrorOnFailure(mDelegate->DeleteEntry(index, &fabric));
if (p && p->HasDefaultDelegate())
{
// The entry was read prior to deletion so its latest value could be provided
// to the listener after deletion. If it's been reset to its default delegate,
// that best effort attempt to retain the latest value failed. This is
// regrettable but OK.
p = nullptr;
}
NotifyEntryChanged(subjectDescriptor, fabric, index, p, EntryListener::ChangeType::kRemoved);
return CHIP_NO_ERROR;
}
void AccessControl::AddEntryListener(EntryListener & listener)
{
if (mEntryListener == nullptr)
{
mEntryListener = &listener;
listener.mNext = nullptr;
return;
}
for (EntryListener * l = mEntryListener; /**/; l = l->mNext)
{
if (l == &listener)
{
return;
}
if (l->mNext == nullptr)
{
l->mNext = &listener;
listener.mNext = nullptr;
return;
}
}
}
void AccessControl::RemoveEntryListener(EntryListener & listener)
{
if (mEntryListener == &listener)
{
mEntryListener = listener.mNext;
listener.mNext = nullptr;
return;
}
for (EntryListener * l = mEntryListener; l != nullptr; l = l->mNext)
{
if (l->mNext == &listener)
{
l->mNext = listener.mNext;
listener.mNext = nullptr;
return;
}
}
}
bool AccessControl::IsAccessRestrictionListSupported() const
{
#if CHIP_CONFIG_USE_ACCESS_RESTRICTIONS
return mAccessRestrictionProvider != nullptr;
#else
return false;
#endif
}
CHIP_ERROR AccessControl::Check(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath,
Privilege requestPrivilege)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
CHIP_ERROR result = CheckACL(subjectDescriptor, requestPath, requestPrivilege);
#if CHIP_CONFIG_USE_ACCESS_RESTRICTIONS
if (result == CHIP_NO_ERROR)
{
result = CheckARL(subjectDescriptor, requestPath, requestPrivilege);
}
#endif
return result;
}
CHIP_ERROR AccessControl::CheckACL(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath,
Privilege requestPrivilege)
{
#if CHIP_PROGRESS_LOGGING && CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
{
constexpr size_t kMaxCatsToLog = 6;
char catLogBuf[kMaxCatsToLog * kCharsPerCatForLogging];
ChipLogProgress(DataManagement,
"AccessControl: checking f=%u a=%c s=0x" ChipLogFormatX64 " t=%s c=" ChipLogFormatMEI " e=%u p=%c r=%c",
subjectDescriptor.fabricIndex, GetAuthModeStringForLogging(subjectDescriptor.authMode),
ChipLogValueX64(subjectDescriptor.subject),
GetCatStringForLogging(catLogBuf, sizeof(catLogBuf), subjectDescriptor.cats),
ChipLogValueMEI(requestPath.cluster), requestPath.endpoint, GetPrivilegeStringForLogging(requestPrivilege),
GetRequestTypeStringForLogging(requestPath.requestType));
}
#endif // CHIP_PROGRESS_LOGGING && CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
{
CHIP_ERROR result = mDelegate->Check(subjectDescriptor, requestPath, requestPrivilege);
if (result != CHIP_ERROR_NOT_IMPLEMENTED)
{
#if CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 0
ChipLogProgress(DataManagement, "AccessControl: %s (delegate)",
(result == CHIP_NO_ERROR) ? "allowed"
: (result == CHIP_ERROR_ACCESS_DENIED) ? "denied"
: "error");
#else
if (result != CHIP_NO_ERROR)
{
ChipLogProgress(DataManagement, "AccessControl: %s (delegate)",
(result == CHIP_ERROR_ACCESS_DENIED) ? "denied" : "error");
}
#endif // CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 0
return result;
}
}
// Operational PASE not supported for v1.0, so PASE implies commissioning, which has highest privilege.
// Currently, subject descriptor is only PASE if this node is the responder (aka commissionee);
// if this node is the initiator (aka commissioner) then the subject descriptor remains blank.
if (subjectDescriptor.authMode == AuthMode::kPase)
{
#if CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
ChipLogProgress(DataManagement, "AccessControl: implicit admin (PASE)");
#endif // CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
return CHIP_NO_ERROR;
}
EntryIterator iterator;
ReturnErrorOnFailure(Entries(iterator, &subjectDescriptor.fabricIndex));
Entry entry;
while (iterator.Next(entry) == CHIP_NO_ERROR)
{
AuthMode authMode = AuthMode::kNone;
ReturnErrorOnFailure(entry.GetAuthMode(authMode));
// Operational PASE not supported for v1.0.
VerifyOrReturnError(authMode == AuthMode::kCase || authMode == AuthMode::kGroup, CHIP_ERROR_INCORRECT_STATE);
if (authMode != subjectDescriptor.authMode)
{
continue;
}
Privilege privilege = Privilege::kView;
ReturnErrorOnFailure(entry.GetPrivilege(privilege));
if (!CheckRequestPrivilegeAgainstEntryPrivilege(requestPrivilege, privilege))
{
continue;
}
size_t subjectCount = 0;
ReturnErrorOnFailure(entry.GetSubjectCount(subjectCount));
if (subjectCount > 0)
{
bool subjectMatched = false;
for (size_t i = 0; i < subjectCount; ++i)
{
NodeId subject = kUndefinedNodeId;
ReturnErrorOnFailure(entry.GetSubject(i, subject));
if (IsOperationalNodeId(subject))
{
VerifyOrReturnError(authMode == AuthMode::kCase, CHIP_ERROR_INCORRECT_STATE);
if (subject == subjectDescriptor.subject)
{
subjectMatched = true;
break;
}
}
else if (IsCASEAuthTag(subject))
{
VerifyOrReturnError(authMode == AuthMode::kCase, CHIP_ERROR_INCORRECT_STATE);
if (subjectDescriptor.cats.CheckSubjectAgainstCATs(subject))
{
subjectMatched = true;
break;
}
}
else if (IsGroupId(subject))
{
VerifyOrReturnError(authMode == AuthMode::kGroup, CHIP_ERROR_INCORRECT_STATE);
if (subject == subjectDescriptor.subject)
{
subjectMatched = true;
break;
}
}
else
{
// Operational PASE not supported for v1.0.
return CHIP_ERROR_INCORRECT_STATE;
}
}
if (!subjectMatched)
{
continue;
}
}
size_t targetCount = 0;
ReturnErrorOnFailure(entry.GetTargetCount(targetCount));
if (targetCount > 0)
{
bool targetMatched = false;
for (size_t i = 0; i < targetCount; ++i)
{
Entry::Target target;
ReturnErrorOnFailure(entry.GetTarget(i, target));
if ((target.flags & Entry::Target::kCluster) && target.cluster != requestPath.cluster)
{
continue;
}
if ((target.flags & Entry::Target::kEndpoint) && target.endpoint != requestPath.endpoint)
{
continue;
}
if (target.flags & Entry::Target::kDeviceType &&
!mDeviceTypeResolver->IsDeviceTypeOnEndpoint(target.deviceType, requestPath.endpoint))
{
continue;
}
targetMatched = true;
break;
}
if (!targetMatched)
{
continue;
}
}
// Entry passed all checks: access is allowed.
#if CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 0
ChipLogProgress(DataManagement, "AccessControl: allowed");
#endif // CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 0
return CHIP_NO_ERROR;
}
// No entry was found which passed all checks: access is denied.
ChipLogProgress(DataManagement, "AccessControl: denied");
return CHIP_ERROR_ACCESS_DENIED;
}
#if CHIP_CONFIG_USE_ACCESS_RESTRICTIONS
CHIP_ERROR AccessControl::CheckARL(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath,
Privilege requestPrivilege)
{
CHIP_ERROR result = CHIP_NO_ERROR;
VerifyOrReturnError(requestPath.requestType != RequestType::kRequestTypeUnknown, CHIP_ERROR_INVALID_ARGUMENT);
if (!IsAccessRestrictionListSupported())
{
// Access Restriction support is compiled in, but not configured/enabled. Nothing to restrict.
return CHIP_NO_ERROR;
}
if (subjectDescriptor.isCommissioning)
{
result = mAccessRestrictionProvider->CheckForCommissioning(subjectDescriptor, requestPath);
}
else
{
result = mAccessRestrictionProvider->Check(subjectDescriptor, requestPath);
}
if (result != CHIP_NO_ERROR)
{
ChipLogProgress(DataManagement, "AccessControl: %s",
(result == CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL) ? "denied (restricted)" : "denied (restriction error)");
return result;
}
return result;
}
#endif
#if CHIP_ACCESS_CONTROL_DUMP_ENABLED
CHIP_ERROR AccessControl::Dump(const Entry & entry)
{
CHIP_ERROR err;
ChipLogDetail(DataManagement, "----- BEGIN ENTRY -----");
{
FabricIndex fabricIndex;
SuccessOrExit(err = entry.GetFabricIndex(fabricIndex));
ChipLogDetail(DataManagement, "fabricIndex: %u", fabricIndex);
}
{
Privilege privilege;
SuccessOrExit(err = entry.GetPrivilege(privilege));
ChipLogDetail(DataManagement, "privilege: %d", to_underlying(privilege));
}
{
AuthMode authMode;
SuccessOrExit(err = entry.GetAuthMode(authMode));
ChipLogDetail(DataManagement, "authMode: %d", to_underlying(authMode));
}
{
size_t count;
SuccessOrExit(err = entry.GetSubjectCount(count));
if (count)
{
ChipLogDetail(DataManagement, "subjects: %u", static_cast<unsigned>(count));
for (size_t i = 0; i < count; ++i)
{
NodeId subject;
SuccessOrExit(err = entry.GetSubject(i, subject));
ChipLogDetail(DataManagement, " %u: 0x" ChipLogFormatX64, static_cast<unsigned>(i), ChipLogValueX64(subject));
}
}
}
{
size_t count;
SuccessOrExit(err = entry.GetTargetCount(count));
if (count)
{
ChipLogDetail(DataManagement, "targets: %u", static_cast<unsigned>(count));
for (size_t i = 0; i < count; ++i)
{
Entry::Target target;
SuccessOrExit(err = entry.GetTarget(i, target));
if (target.flags & Entry::Target::kCluster)
{
ChipLogDetail(DataManagement, " %u: cluster: 0x" ChipLogFormatMEI, static_cast<unsigned>(i),
ChipLogValueMEI(target.cluster));
}
if (target.flags & Entry::Target::kEndpoint)
{
ChipLogDetail(DataManagement, " %u: endpoint: %u", static_cast<unsigned>(i), target.endpoint);
}
if (target.flags & Entry::Target::kDeviceType)
{
ChipLogDetail(DataManagement, " %u: deviceType: 0x" ChipLogFormatMEI, static_cast<unsigned>(i),
ChipLogValueMEI(target.deviceType));
}
}
}
}
ChipLogDetail(DataManagement, "----- END ENTRY -----");
return CHIP_NO_ERROR;
exit:
ChipLogError(DataManagement, "AccessControl: dump failed %" CHIP_ERROR_FORMAT, err.Format());
return err;
}
#endif
bool AccessControl::IsValid(const Entry & entry)
{
const char * log = "unexpected error";
IgnoreUnusedVariable(log); // logging may be disabled
AuthMode authMode = AuthMode::kNone;
FabricIndex fabricIndex = kUndefinedFabricIndex;
Privilege privilege = static_cast<Privilege>(0);
size_t subjectCount = 0;
size_t targetCount = 0;
CHIP_ERROR err = CHIP_NO_ERROR;
SuccessOrExit(err = entry.GetAuthMode(authMode));
SuccessOrExit(err = entry.GetFabricIndex(fabricIndex));
SuccessOrExit(err = entry.GetPrivilege(privilege));
SuccessOrExit(err = entry.GetSubjectCount(subjectCount));
SuccessOrExit(err = entry.GetTargetCount(targetCount));
#if CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
ChipLogProgress(DataManagement, "AccessControl: validating f=%u p=%c a=%c s=%d t=%d", fabricIndex,
GetPrivilegeStringForLogging(privilege), GetAuthModeStringForLogging(authMode), static_cast<int>(subjectCount),
static_cast<int>(targetCount));
#endif // CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
// Fabric index must be defined.
VerifyOrExit(fabricIndex != kUndefinedFabricIndex, log = "invalid fabric index");
if (authMode != AuthMode::kCase)
{
// Operational PASE not supported for v1.0 (so must be group).
VerifyOrExit(authMode == AuthMode::kGroup, log = "invalid auth mode");
// Privilege must not be administer.
VerifyOrExit(privilege != Privilege::kAdminister, log = "invalid privilege");
}
for (size_t i = 0; i < subjectCount; ++i)
{
NodeId subject;
SuccessOrExit(err = entry.GetSubject(i, subject));
const bool kIsCase = authMode == AuthMode::kCase;
const bool kIsGroup = authMode == AuthMode::kGroup;
#if CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
ChipLogProgress(DataManagement, " validating subject 0x" ChipLogFormatX64, ChipLogValueX64(subject));
#endif // CHIP_CONFIG_ACCESS_CONTROL_POLICY_LOGGING_VERBOSITY > 1
VerifyOrExit((kIsCase && IsValidCaseNodeId(subject)) || (kIsGroup && IsValidGroupNodeId(subject)), log = "invalid subject");
}
for (size_t i = 0; i < targetCount; ++i)
{
Entry::Target target;
SuccessOrExit(err = entry.GetTarget(i, target));
const bool kHasCluster = target.flags & Entry::Target::kCluster;
const bool kHasEndpoint = target.flags & Entry::Target::kEndpoint;
const bool kHasDeviceType = target.flags & Entry::Target::kDeviceType;
VerifyOrExit((kHasCluster || kHasEndpoint || kHasDeviceType) && !(kHasEndpoint && kHasDeviceType) &&
(!kHasCluster || IsValidClusterId(target.cluster)) &&
(!kHasEndpoint || IsValidEndpointId(target.endpoint)) &&
(!kHasDeviceType || IsValidDeviceTypeId(target.deviceType)),
log = "invalid target");
}
return true;
exit:
if (err != CHIP_NO_ERROR)
{
ChipLogError(DataManagement, "AccessControl: %s %" CHIP_ERROR_FORMAT, log, err.Format());
}
else
{
ChipLogError(DataManagement, "AccessControl: %s", log);
}
return false;
}
void AccessControl::NotifyEntryChanged(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index,
const Entry * entry, EntryListener::ChangeType changeType)
{
for (EntryListener * listener = mEntryListener; listener != nullptr; listener = listener->mNext)
{
listener->OnEntryChanged(subjectDescriptor, fabric, index, entry, changeType);
}
}
AccessControl & GetAccessControl()
{
return (globalAccessControl) ? *globalAccessControl : defaultAccessControl.get();
}
void SetAccessControl(AccessControl & accessControl)
{
ChipLogProgress(DataManagement, "AccessControl: setting");
globalAccessControl = &accessControl;
}
void ResetAccessControlToDefault()
{
globalAccessControl = nullptr;
}
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,724 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <access/AccessConfig.h>
#if CHIP_CONFIG_USE_ACCESS_RESTRICTIONS
#include "AccessRestrictionProvider.h"
#endif
#include "Privilege.h"
#include "RequestPath.h"
#include "SubjectDescriptor.h"
#include <lib/core/CHIPCore.h>
#include <lib/core/Global.h>
#include <lib/support/CodeUtils.h>
// Dump function for use during development only (0 for disabled, non-zero for enabled).
#define CHIP_ACCESS_CONTROL_DUMP_ENABLED 0
namespace chip {
namespace Access {
class AccessControl
{
public:
/**
* Used by access control to determine if a device type resolves to an endpoint.
*/
struct DeviceTypeResolver
{
public:
virtual ~DeviceTypeResolver() = default;
virtual bool IsDeviceTypeOnEndpoint(DeviceTypeId deviceType, EndpointId endpoint) = 0;
};
/**
* Handle to an entry in the access control list.
*
* Must be prepared (`AccessControl::PrepareEntry`) or read (`AccessControl::ReadEntry`) before first use.
*/
class Entry
{
public:
struct Target
{
using Flags = unsigned;
static constexpr Flags kCluster = 1 << 0;
static constexpr Flags kEndpoint = 1 << 1;
static constexpr Flags kDeviceType = 1 << 2;
Flags flags = 0;
ClusterId cluster;
EndpointId endpoint;
DeviceTypeId deviceType;
};
class Delegate
{
public:
Delegate() = default;
Delegate(const Delegate &) = delete;
Delegate & operator=(const Delegate &) = delete;
virtual ~Delegate() = default;
virtual void Release() {}
// Simple getters
virtual CHIP_ERROR GetAuthMode(AuthMode & authMode) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR GetFabricIndex(FabricIndex & fabricIndex) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR GetPrivilege(Privilege & privilege) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
// Simple setters
virtual CHIP_ERROR SetAuthMode(AuthMode authMode) { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR SetFabricIndex(FabricIndex fabricIndex) { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR SetPrivilege(Privilege privilege) { return CHIP_ERROR_NOT_IMPLEMENTED; }
// Subjects
virtual CHIP_ERROR GetSubjectCount(size_t & count) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR GetSubject(size_t index, NodeId & subject) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR SetSubject(size_t index, NodeId subject) { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR AddSubject(size_t * index, NodeId subject) { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR RemoveSubject(size_t index) { return CHIP_ERROR_NOT_IMPLEMENTED; }
// Targets
virtual CHIP_ERROR GetTargetCount(size_t & count) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR GetTarget(size_t index, Target & target) const { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR SetTarget(size_t index, const Target & target) { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR AddTarget(size_t * index, const Target & target) { return CHIP_ERROR_NOT_IMPLEMENTED; }
virtual CHIP_ERROR RemoveTarget(size_t index) { return CHIP_ERROR_NOT_IMPLEMENTED; }
};
Entry() = default;
Entry(Entry && other) : mDelegate(other.mDelegate) { other.mDelegate = &mDefaultDelegate.get(); }
Entry & operator=(Entry && other)
{
if (this != &other)
{
mDelegate->Release();
mDelegate = other.mDelegate;
other.mDelegate = &mDefaultDelegate.get();
}
return *this;
}
Entry(const Entry &) = delete;
Entry & operator=(const Entry &) = delete;
~Entry() { mDelegate->Release(); }
// Simple getters
CHIP_ERROR GetAuthMode(AuthMode & authMode) const { return mDelegate->GetAuthMode(authMode); }
CHIP_ERROR GetFabricIndex(FabricIndex & fabricIndex) const { return mDelegate->GetFabricIndex(fabricIndex); }
CHIP_ERROR GetPrivilege(Privilege & privilege) const { return mDelegate->GetPrivilege(privilege); }
// Simple setters
CHIP_ERROR SetAuthMode(AuthMode authMode) { return mDelegate->SetAuthMode(authMode); }
CHIP_ERROR SetFabricIndex(FabricIndex fabricIndex) { return mDelegate->SetFabricIndex(fabricIndex); }
CHIP_ERROR SetPrivilege(Privilege privilege) { return mDelegate->SetPrivilege(privilege); }
/**
* Gets the number of subjects.
*
* @param [out] count The number of subjects.
*/
CHIP_ERROR GetSubjectCount(size_t & count) const { return mDelegate->GetSubjectCount(count); }
/**
* Gets the specified subject.
*
* @param [in] index The index of the subject to get.
* @param [out] subject The subject into which to get.
*/
CHIP_ERROR GetSubject(size_t index, NodeId & subject) const { return mDelegate->GetSubject(index, subject); }
/**
* Sets the specified subject.
*
* @param [in] index The index of the subject to set.
* @param [in] subject The subject from which to set.
*/
CHIP_ERROR SetSubject(size_t index, NodeId subject) { return mDelegate->SetSubject(index, subject); }
/**
* Adds the specified subject.
*
* @param [out] index The index of the added subject, if not null.
* @param [in] subject The subject to add.
*/
CHIP_ERROR AddSubject(size_t * index, NodeId subject) { return mDelegate->AddSubject(index, subject); }
/**
* Removes the specified subject.
*
* @param [in] index The index of the subject to delete.
*/
CHIP_ERROR RemoveSubject(size_t index) { return mDelegate->RemoveSubject(index); }
/**
* Gets the number of targets.
*
* @param [out] count The number of targets.
*/
CHIP_ERROR GetTargetCount(size_t & count) const { return mDelegate->GetTargetCount(count); }
/**
* Gets the specified target.
*
* @param [in] index The index of the target to get.
* @param [out] target The target into which to get.
*/
CHIP_ERROR GetTarget(size_t index, Target & target) const { return mDelegate->GetTarget(index, target); }
/**
* Sets the specified target.
*
* @param [in] index The index of the target to set.
* @param [in] target The target from which to set.
*/
CHIP_ERROR SetTarget(size_t index, const Target & target) { return mDelegate->SetTarget(index, target); }
/**
* Adds the specified target.
*
* @param [out] index The index of the added target, if not null.
* @param [in] target The target to add.
*/
CHIP_ERROR AddTarget(size_t * index, const Target & target) { return mDelegate->AddTarget(index, target); }
/**
* Removes the specified target.
*
* @param [in] index The index of the target to delete.
*/
CHIP_ERROR RemoveTarget(size_t index) { return mDelegate->RemoveTarget(index); }
bool HasDefaultDelegate() const { return mDelegate == &mDefaultDelegate.get(); }
const Delegate & GetDelegate() const { return *mDelegate; }
Delegate & GetDelegate() { return *mDelegate; }
void SetDelegate(Delegate & delegate)
{
mDelegate->Release();
mDelegate = &delegate;
}
void ResetDelegate()
{
mDelegate->Release();
mDelegate = &mDefaultDelegate.get();
}
private:
static Global<Delegate> mDefaultDelegate;
Delegate * mDelegate = &mDefaultDelegate.get();
};
/**
* Handle to an entry iterator in the access control list.
*
* Must be initialized (`AccessControl::Entries`) before first use.
*/
class EntryIterator
{
public:
class Delegate
{
public:
Delegate() = default;
Delegate(const Delegate &) = delete;
Delegate & operator=(const Delegate &) = delete;
virtual ~Delegate() = default;
virtual void Release() {}
virtual CHIP_ERROR Next(Entry & entry) { return CHIP_ERROR_SENTINEL; }
};
EntryIterator() = default;
EntryIterator(const EntryIterator &) = delete;
EntryIterator & operator=(const EntryIterator &) = delete;
~EntryIterator() { mDelegate->Release(); }
CHIP_ERROR Next(Entry & entry) { return mDelegate->Next(entry); }
const Delegate & GetDelegate() const { return *mDelegate; }
Delegate & GetDelegate() { return *mDelegate; }
void SetDelegate(Delegate & delegate)
{
mDelegate->Release();
mDelegate = &delegate;
}
void ResetDelegate()
{
mDelegate->Release();
mDelegate = &mDefaultDelegate.get();
}
private:
static Global<Delegate> mDefaultDelegate;
Delegate * mDelegate = &mDefaultDelegate.get();
};
/**
* Used by access control to notify of changes in access control list.
*/
class EntryListener
{
public:
enum class ChangeType
{
kAdded = 1,
kRemoved = 2,
kUpdated = 3
};
virtual ~EntryListener() = default;
/**
* Notifies of a change in the access control list.
*
* The fabric is indicated by its own parameter. If available, a subject descriptor will
* have more detail (and its fabric index will match). A best effort is made to provide
* the latest value of the changed entry.
*
* @param [in] subjectDescriptor Optional (if available) subject descriptor for this operation.
* @param [in] fabric Index of fabric in which entry has changed.
* @param [in] index Index of entry to which has changed (relative to fabric).
* @param [in] entry Optional (best effort) latest value of entry which has changed.
* @param [in] changeType Type of change.
*/
virtual void OnEntryChanged(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index,
const Entry * entry, ChangeType changeType) = 0;
private:
EntryListener * mNext = nullptr;
friend class AccessControl;
};
class Delegate
{
public:
Delegate() = default;
Delegate(const Delegate &) = delete;
Delegate & operator=(const Delegate &) = delete;
virtual ~Delegate() = default;
virtual void Release() {}
virtual CHIP_ERROR Init() { return CHIP_NO_ERROR; }
virtual void Finish() {}
// Capabilities
virtual CHIP_ERROR GetMaxEntriesPerFabric(size_t & value) const
{
value = 0;
return CHIP_NO_ERROR;
}
virtual CHIP_ERROR GetMaxSubjectsPerEntry(size_t & value) const
{
value = 0;
return CHIP_NO_ERROR;
}
virtual CHIP_ERROR GetMaxTargetsPerEntry(size_t & value) const
{
value = 0;
return CHIP_NO_ERROR;
}
virtual CHIP_ERROR GetMaxEntryCount(size_t & value) const
{
value = 0;
return CHIP_NO_ERROR;
}
// Actualities
virtual CHIP_ERROR GetEntryCount(FabricIndex fabric, size_t & value) const
{
value = 0;
return CHIP_NO_ERROR;
}
virtual CHIP_ERROR GetEntryCount(size_t & value) const
{
value = 0;
return CHIP_NO_ERROR;
}
// Preparation
virtual CHIP_ERROR PrepareEntry(Entry & entry) { return CHIP_NO_ERROR; }
// CRUD
virtual CHIP_ERROR CreateEntry(size_t * index, const Entry & entry, FabricIndex * fabricIndex) { return CHIP_NO_ERROR; }
virtual CHIP_ERROR ReadEntry(size_t index, Entry & entry, const FabricIndex * fabricIndex) const { return CHIP_NO_ERROR; }
virtual CHIP_ERROR UpdateEntry(size_t index, const Entry & entry, const FabricIndex * fabricIndex) { return CHIP_NO_ERROR; }
virtual CHIP_ERROR DeleteEntry(size_t index, const FabricIndex * fabricIndex) { return CHIP_NO_ERROR; }
// Iteration
virtual CHIP_ERROR Entries(EntryIterator & iterator, const FabricIndex * fabricIndex) const { return CHIP_NO_ERROR; }
// Check
// Return CHIP_NO_ERROR if allowed, CHIP_ERROR_ACCESS_DENIED if denied,
// CHIP_ERROR_NOT_IMPLEMENTED to use the default check algorithm (against entries),
// or any other CHIP_ERROR if another error occurred.
virtual CHIP_ERROR Check(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath,
Privilege requestPrivilege)
{
return CHIP_ERROR_ACCESS_DENIED;
}
};
AccessControl() = default;
AccessControl(const AccessControl &) = delete;
AccessControl & operator=(const AccessControl &) = delete;
~AccessControl()
{
// Never-initialized AccessControl instances will not have the delegate set.
if (IsInitialized())
{
mDelegate->Release();
}
}
/**
* Initialize the access control module. Must be called before first use.
*
* @return CHIP_NO_ERROR on success, CHIP_ERROR_INCORRECT_STATE if called more than once,
* CHIP_ERROR_INVALID_ARGUMENT if delegate is null, or other fatal error.
*/
CHIP_ERROR Init(AccessControl::Delegate * delegate, DeviceTypeResolver & deviceTypeResolver);
/**
* Deinitialize the access control module. Must be called when finished.
*/
void Finish();
// Capabilities
CHIP_ERROR GetMaxEntriesPerFabric(size_t & value) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->GetMaxEntriesPerFabric(value);
}
CHIP_ERROR GetMaxSubjectsPerEntry(size_t & value) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->GetMaxSubjectsPerEntry(value);
}
CHIP_ERROR GetMaxTargetsPerEntry(size_t & value) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->GetMaxTargetsPerEntry(value);
}
CHIP_ERROR GetMaxEntryCount(size_t & value) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->GetMaxEntryCount(value);
}
// Actualities
CHIP_ERROR GetEntryCount(FabricIndex fabric, size_t & value) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->GetEntryCount(fabric, value);
}
CHIP_ERROR GetEntryCount(size_t & value) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->GetEntryCount(value);
}
/**
* Prepares an entry.
*
* An entry must be prepared or read (`ReadEntry`) before first use.
*
* @param [in] entry Entry to prepare.
*/
CHIP_ERROR PrepareEntry(Entry & entry)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->PrepareEntry(entry);
}
/**
* Creates an entry in the access control list.
*
* @param [in] subjectDescriptor Optional subject descriptor for this operation.
* @param [in] fabric Index of fabric in which to create entry.
* @param [out] index (If not nullptr) index of created entry (relative to fabric).
* @param [in] entry Entry from which created entry is copied.
*/
CHIP_ERROR CreateEntry(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t * index, const Entry & entry);
/**
* Creates an entry in the access control list.
*
* @param [out] index Entry index of created entry, if not null. May be relative to `fabricIndex`.
* @param [in] entry Entry from which to copy.
* @param [out] fabricIndex Fabric index of created entry, if not null, in which case entry `index` will be relative to fabric.
*/
CHIP_ERROR CreateEntry(size_t * index, const Entry & entry, FabricIndex * fabricIndex = nullptr)
{
ReturnErrorCodeIf(!IsValid(entry), CHIP_ERROR_INVALID_ARGUMENT);
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->CreateEntry(index, entry, fabricIndex);
}
/**
* Reads an entry in the access control list.
*
* @param [in] fabric Index of fabric in which to read entry.
* @param [in] index Index of entry to read (relative to fabric).
* @param [in] entry Entry into which read entry is copied.
*/
CHIP_ERROR ReadEntry(FabricIndex fabric, size_t index, Entry & entry) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->ReadEntry(index, entry, &fabric);
}
/**
* Reads an entry from the access control list.
*
* @param [in] index Entry index of entry to read. May be relative to `fabricIndex`.
* @param [out] entry Entry into which to copy.
* @param [in] fabricIndex Fabric to which entry `index` is relative, if not null.
*/
CHIP_ERROR ReadEntry(size_t index, Entry & entry, const FabricIndex * fabricIndex = nullptr) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->ReadEntry(index, entry, fabricIndex);
}
/**
* Updates an entry in the access control list.
*
* @param [in] subjectDescriptor Optional subject descriptor for this operation.
* @param [in] fabric Index of fabric in which to update entry.
* @param [in] index Index of entry to update (relative to fabric).
* @param [in] entry Entry from which updated entry is copied.
*/
CHIP_ERROR UpdateEntry(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index, const Entry & entry);
/**
* Updates an entry in the access control list.
*
* @param [in] index Entry index of entry to update, if not null. May be relative to `fabricIndex`.
* @param [in] entry Entry from which to copy.
* @param [in] fabricIndex Fabric to which entry `index` is relative, if not null.
*/
CHIP_ERROR UpdateEntry(size_t index, const Entry & entry, const FabricIndex * fabricIndex = nullptr)
{
ReturnErrorCodeIf(!IsValid(entry), CHIP_ERROR_INVALID_ARGUMENT);
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->UpdateEntry(index, entry, fabricIndex);
}
/**
* Deletes an entry in the access control list.
*
* @param [in] subjectDescriptor Optional subject descriptor for this operation.
* @param [in] fabric Index of fabric in which to delete entry.
* @param [in] index Index of entry to delete (relative to fabric).
*/
CHIP_ERROR DeleteEntry(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index);
/**
* Deletes an entry from the access control list.
*
* @param [in] index Entry index of entry to delete. May be relative to `fabricIndex`.
* @param [in] fabricIndex Fabric to which entry `index` is relative, if not null.
*/
CHIP_ERROR DeleteEntry(size_t index, const FabricIndex * fabricIndex = nullptr)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->DeleteEntry(index, fabricIndex);
}
/**
* @brief Remove all ACL entries for the given fabricIndex
*
* @param[in] fabricIndex fabric index for which to remove all entries
*/
CHIP_ERROR DeleteAllEntriesForFabric(FabricIndex fabricIndex)
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
CHIP_ERROR stickyError = CHIP_NO_ERROR;
// Remove access control entries in reverse order (it could be any order, but reverse order
// will cause less churn in persistent storage).
size_t aclCount = 0;
if (GetEntryCount(fabricIndex, aclCount) == CHIP_NO_ERROR)
{
while (aclCount)
{
CHIP_ERROR err = DeleteEntry(nullptr, fabricIndex, --aclCount);
stickyError = (stickyError == CHIP_NO_ERROR) ? err : stickyError;
}
}
return stickyError;
}
/**
* Iterates over entries in the access control list.
*
* @param [in] fabric Fabric over which to iterate entries.
* @param [out] iterator Iterator controlling the iteration.
*/
CHIP_ERROR Entries(FabricIndex fabric, EntryIterator & iterator) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->Entries(iterator, &fabric);
}
/**
* Iterates over entries in the access control list.
*
* @param [out] iterator Iterator controlling the iteration.
* @param [in] fabricIndex Iteration is confined to fabric, if not null.
*/
CHIP_ERROR Entries(EntryIterator & iterator, const FabricIndex * fabricIndex = nullptr) const
{
VerifyOrReturnError(IsInitialized(), CHIP_ERROR_INCORRECT_STATE);
return mDelegate->Entries(iterator, fabricIndex);
}
// Adds a listener to the end of the listener list, if not already in the list.
void AddEntryListener(EntryListener & listener);
// Removes a listener from the listener list, if in the list.
void RemoveEntryListener(EntryListener & listener);
#if CHIP_CONFIG_USE_ACCESS_RESTRICTIONS
// Set an optional AcceessRestriction object for MNGD feature.
void SetAccessRestrictionProvider(AccessRestrictionProvider * accessRestrictionProvider)
{
mAccessRestrictionProvider = accessRestrictionProvider;
}
AccessRestrictionProvider * GetAccessRestrictionProvider() { return mAccessRestrictionProvider; }
#endif
/**
* Check whether or not Access Restriction List is supported.
*
* @retval true if Access Restriction List is supported.
*/
bool IsAccessRestrictionListSupported() const;
/**
* Check whether access (by a subject descriptor, to a request path,
* requiring a privilege) should be allowed or denied.
*
* If an AccessRestrictionProvider object is set, it will be checked for additional access restrictions.
*
* @retval #CHIP_ERROR_ACCESS_DENIED if denied.
* @retval other errors should also be treated as denied.
* @retval #CHIP_NO_ERROR if allowed.
*/
CHIP_ERROR Check(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath, Privilege requestPrivilege);
#if CHIP_ACCESS_CONTROL_DUMP_ENABLED
CHIP_ERROR Dump(const Entry & entry);
#endif
private:
bool IsInitialized() const { return (mDelegate != nullptr); }
bool IsValid(const Entry & entry);
void NotifyEntryChanged(const SubjectDescriptor * subjectDescriptor, FabricIndex fabric, size_t index, const Entry * entry,
EntryListener::ChangeType changeType);
/**
* Check ACL for whether access (by a subject descriptor, to a request path,
* requiring a privilege) should be allowed or denied.
*/
CHIP_ERROR CheckACL(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath, Privilege requestPrivilege);
/**
* Check CommissioningARL or ARL (as appropriate) for whether access (by a
* subject descriptor, to a request path, requiring a privilege) should
* be allowed or denied.
*/
CHIP_ERROR CheckARL(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath, Privilege requestPrivilege);
private:
Delegate * mDelegate = nullptr;
DeviceTypeResolver * mDeviceTypeResolver = nullptr;
EntryListener * mEntryListener = nullptr;
#if CHIP_CONFIG_USE_ACCESS_RESTRICTIONS
AccessRestrictionProvider * mAccessRestrictionProvider;
#endif
};
/**
* Get the global instance set by SetAccessControl, or the default.
*
* Calls to this function must be synchronized externally.
*/
AccessControl & GetAccessControl();
/**
* Set the global instance returned by GetAccessControl.
*
* Calls to this function must be synchronized externally.
*/
void SetAccessControl(AccessControl & accessControl);
/**
* Reset the global instance to the default.
*
* Calls to this function must be synchronized externally.
*/
void ResetAccessControlToDefault();
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,235 @@
/*
*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "AccessRestrictionProvider.h"
#include <algorithm>
#include <lib/core/Global.h>
using namespace chip::Platform;
namespace chip {
namespace Access {
void AccessRestrictionProvider::AddListener(Listener & listener)
{
if (mListeners == nullptr)
{
mListeners = &listener;
listener.mNext = nullptr;
return;
}
for (Listener * l = mListeners; /**/; l = l->mNext)
{
if (l == &listener)
{
return;
}
if (l->mNext == nullptr)
{
l->mNext = &listener;
listener.mNext = nullptr;
return;
}
}
}
void AccessRestrictionProvider::RemoveListener(Listener & listener)
{
if (mListeners == &listener)
{
mListeners = listener.mNext;
listener.mNext = nullptr;
return;
}
for (Listener * l = mListeners; l != nullptr; l = l->mNext)
{
if (l->mNext == &listener)
{
l->mNext = listener.mNext;
listener.mNext = nullptr;
return;
}
}
}
CHIP_ERROR AccessRestrictionProvider::SetCommissioningEntries(const std::vector<Entry> & entries)
{
for (auto & entry : entries)
{
if (!mExceptionChecker.AreRestrictionsAllowed(entry.endpointNumber, entry.clusterId))
{
ChipLogError(DataManagement, "AccessRestrictionProvider: invalid entry");
return CHIP_ERROR_INVALID_ARGUMENT;
}
}
mCommissioningEntries = entries;
for (Listener * listener = mListeners; listener != nullptr; listener = listener->mNext)
{
listener->MarkCommissioningRestrictionListChanged();
}
return CHIP_NO_ERROR;
}
CHIP_ERROR AccessRestrictionProvider::SetEntries(const FabricIndex fabricIndex, const std::vector<Entry> & entries)
{
std::vector<Entry> updatedEntries;
for (auto & entry : entries)
{
if (!mExceptionChecker.AreRestrictionsAllowed(entry.endpointNumber, entry.clusterId))
{
ChipLogError(DataManagement, "AccessRestrictionProvider: invalid entry");
return CHIP_ERROR_INVALID_ARGUMENT;
}
Entry updatedEntry = entry;
updatedEntry.fabricIndex = fabricIndex;
updatedEntries.push_back(updatedEntry);
}
mFabricEntries[fabricIndex] = std::move(updatedEntries);
for (Listener * listener = mListeners; listener != nullptr; listener = listener->mNext)
{
listener->MarkRestrictionListChanged(fabricIndex);
}
return CHIP_NO_ERROR;
}
bool AccessRestrictionProvider::StandardAccessRestrictionExceptionChecker::AreRestrictionsAllowed(EndpointId endpoint,
ClusterId cluster)
{
if (endpoint != kRootEndpointId &&
(cluster == app::Clusters::WiFiNetworkManagement::Id || cluster == app::Clusters::ThreadBorderRouterManagement::Id ||
cluster == app::Clusters::ThreadNetworkDirectory::Id))
{
return true;
}
return false;
}
CHIP_ERROR AccessRestrictionProvider::CheckForCommissioning(const SubjectDescriptor & subjectDescriptor,
const RequestPath & requestPath)
{
return DoCheck(mCommissioningEntries, subjectDescriptor, requestPath);
}
CHIP_ERROR AccessRestrictionProvider::Check(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath)
{
return DoCheck(mFabricEntries[subjectDescriptor.fabricIndex], subjectDescriptor, requestPath);
}
CHIP_ERROR AccessRestrictionProvider::DoCheck(const std::vector<Entry> & entries, const SubjectDescriptor & subjectDescriptor,
const RequestPath & requestPath)
{
if (!mExceptionChecker.AreRestrictionsAllowed(requestPath.endpoint, requestPath.cluster))
{
ChipLogProgress(DataManagement, "AccessRestrictionProvider: skipping checks for unrestrictable request path");
return CHIP_NO_ERROR;
}
ChipLogProgress(DataManagement, "AccessRestrictionProvider: action %d", to_underlying(requestPath.requestType));
if (requestPath.requestType == RequestType::kRequestTypeUnknown)
{
ChipLogError(DataManagement, "AccessRestrictionProvider: RequestPath type is unknown");
return CHIP_ERROR_INVALID_ARGUMENT;
}
// wildcard event subscriptions are allowed since wildcard is only used when setting up the subscription and
// we want that request to succeed (when generating the report, this method will be called with the specific
// event id). All other requests require an entity id
if (!requestPath.entityId.has_value())
{
if (requestPath.requestType == RequestType::kEventReadRequest)
{
return CHIP_NO_ERROR;
}
else
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
}
for (auto & entry : entries)
{
if (entry.endpointNumber != requestPath.endpoint || entry.clusterId != requestPath.cluster)
{
continue;
}
for (auto & restriction : entry.restrictions)
{
// A missing id is a wildcard
bool idMatch = !restriction.id.HasValue() || restriction.id.Value() == requestPath.entityId.value();
if (!idMatch)
{
continue;
}
switch (restriction.restrictionType)
{
case Type::kAttributeAccessForbidden:
if (requestPath.requestType == RequestType::kAttributeReadRequest ||
requestPath.requestType == RequestType::kAttributeWriteRequest)
{
if (!IsGlobalAttribute(requestPath.entityId.value()))
{
return CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL;
}
}
break;
case Type::kAttributeWriteForbidden:
if (requestPath.requestType == RequestType::kAttributeWriteRequest)
{
if (!IsGlobalAttribute(requestPath.entityId.value()))
{
return CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL;
}
}
break;
case Type::kCommandForbidden:
if (requestPath.requestType == RequestType::kCommandInvokeRequest)
{
return CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL;
}
break;
case Type::kEventForbidden:
if (requestPath.requestType == RequestType::kEventReadRequest)
{
return CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL;
}
break;
}
}
}
return CHIP_NO_ERROR;
}
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,275 @@
/*
*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "Privilege.h"
#include "RequestPath.h"
#include "SubjectDescriptor.h"
#include <algorithm>
#include <app-common/zap-generated/cluster-objects.h>
#include <cstdint>
#include <lib/core/CHIPError.h>
#include <lib/core/DataModelTypes.h>
#include <lib/core/Optional.h>
#include <lib/support/CHIPMem.h>
#include <map>
#include <memory>
#include <protocols/interaction_model/Constants.h>
#include <vector>
namespace chip {
namespace Access {
class AccessRestrictionProvider
{
public:
static constexpr size_t kNumberOfFabrics = CHIP_CONFIG_MAX_FABRICS;
static constexpr size_t kEntriesPerFabric = CHIP_CONFIG_ACCESS_RESTRICTION_MAX_ENTRIES_PER_FABRIC;
static constexpr size_t kRestrictionsPerEntry = CHIP_CONFIG_ACCESS_RESTRICTION_MAX_RESTRICTIONS_PER_ENTRY;
/**
* Defines the type of access restriction, which is used to determine the meaning of the restriction's id.
*/
enum class Type : uint8_t
{
kAttributeAccessForbidden = 0,
kAttributeWriteForbidden = 1,
kCommandForbidden = 2,
kEventForbidden = 3
};
/**
* Defines a single restriction on an attribute, command, or event.
*
* If id is not set, the restriction applies to all attributes, commands, or events of the given type (wildcard).
*/
struct Restriction
{
Type restrictionType;
Optional<uint32_t> id;
};
/**
* Defines a single entry in the access restriction list, which contains a list of restrictions
* for a cluster on an endpoint.
*/
struct Entry
{
FabricIndex fabricIndex;
EndpointId endpointNumber;
ClusterId clusterId;
std::vector<Restriction> restrictions;
};
/**
* Defines the interface for a checker for access restriction exceptions.
*/
class AccessRestrictionExceptionChecker
{
public:
virtual ~AccessRestrictionExceptionChecker() = default;
/**
* Check if any restrictions are allowed to be applied on the given endpoint and cluster
* because of constraints against their use in ARLs.
*
* @retval true if ARL checks are allowed to be applied to the cluster on the endpoint, false otherwise
*/
virtual bool AreRestrictionsAllowed(EndpointId endpoint, ClusterId cluster) = 0;
};
/**
* Define a standard implementation of the AccessRestrictionExceptionChecker interface
* which is the default implementation used by AccessResrictionProvider.
*/
class StandardAccessRestrictionExceptionChecker : public AccessRestrictionExceptionChecker
{
public:
StandardAccessRestrictionExceptionChecker() = default;
~StandardAccessRestrictionExceptionChecker() = default;
bool AreRestrictionsAllowed(EndpointId endpoint, ClusterId cluster) override;
};
/**
* Used to notify of changes in the access restriction list and active reviews.
*/
class Listener
{
public:
virtual ~Listener() = default;
/**
* Notifies of a change in the commissioning access restriction list.
*/
virtual void MarkCommissioningRestrictionListChanged() = 0;
/**
* Notifies of a change in the access restriction list.
*
* @param [in] fabricIndex The index of the fabric in which the list has changed.
*/
virtual void MarkRestrictionListChanged(FabricIndex fabricIndex) = 0;
/**
* Notifies of an update to an active review with instructions and an optional redirect URL.
*
* @param [in] fabricIndex The index of the fabric in which the entry has changed.
* @param [in] token The token of the review being updated (obtained from ReviewFabricRestrictionsResponse)
* @param [in] instruction Optional instructions to be displayed to the user.
* @param [in] redirectUrl An optional URL to redirect the user to for more information.
*/
virtual void OnFabricRestrictionReviewUpdate(FabricIndex fabricIndex, uint64_t token, Optional<CharSpan> instruction,
Optional<CharSpan> redirectUrl) = 0;
private:
Listener * mNext = nullptr;
friend class AccessRestrictionProvider;
};
AccessRestrictionProvider() = default;
virtual ~AccessRestrictionProvider() = default;
AccessRestrictionProvider(const AccessRestrictionProvider &) = delete;
AccessRestrictionProvider & operator=(const AccessRestrictionProvider &) = delete;
/**
* Set the restriction entries that are to be used during commissioning when there is no accessing fabric.
*
* @param [in] entries The entries to set.
*/
CHIP_ERROR SetCommissioningEntries(const std::vector<Entry> & entries);
/**
* Set the restriction entries for a fabric.
*
* @param [in] fabricIndex The index of the fabric for which to create entries.
* @param [in] entries The entries to set for the fabric.
*/
CHIP_ERROR SetEntries(const FabricIndex, const std::vector<Entry> & entries);
/**
* Add a listener to be notified of changes in the access restriction list and active reviews.
*
* @param [in] listener The listener to add.
*/
void AddListener(Listener & listener);
/**
* Remove a listener from being notified of changes in the access restriction list and active reviews.
*
* @param [in] listener The listener to remove.
*/
void RemoveListener(Listener & listener);
/**
* Check whether access by a subject descriptor to a request path should be restricted (denied) for the given action
* during commissioning by using the CommissioningEntries.
*
* These restrictions are are only a part of overall access evaluation.
*
* If access is not restricted, CHIP_NO_ERROR will be returned.
*
* @retval CHIP_ERROR_ACCESS_DENIED if access is denied.
* @retval other errors should also be treated as restricted/denied.
* @retval CHIP_NO_ERROR if access is not restricted/denied.
*/
CHIP_ERROR CheckForCommissioning(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath);
/**
* Check whether access by a subject descriptor to a request path should be restricted (denied) for the given action.
* These restrictions are are only a part of overall access evaluation.
*
* If access is not restricted, CHIP_NO_ERROR will be returned.
*
* @retval CHIP_ERROR_ACCESS_DENIED if access is denied.
* @retval other errors should also be treated as restricted/denied.
* @retval CHIP_NO_ERROR if access is not restricted/denied.
*/
CHIP_ERROR Check(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath);
/**
* Request a review of the access restrictions for a fabric.
*
* @param [in] fabricIndex The index of the fabric requesting a review.
* @param [in] arl An optinal list of access restriction entries to review. If null, all entries will be reviewed.
* @param [out] token The unique token for the review, which can be matched to a review update event.
*/
CHIP_ERROR RequestFabricRestrictionReview(FabricIndex fabricIndex, const std::vector<Entry> & arl, uint64_t & token)
{
token = mNextToken++;
return DoRequestFabricRestrictionReview(fabricIndex, token, arl);
}
/**
* Get the commissioning restriction entries.
*
* @retval the commissioning restriction entries.
*/
const std::vector<Entry> & GetCommissioningEntries() const { return mCommissioningEntries; }
/**
* Get the restriction entries for a fabric.
*
* @param [in] fabricIndex the index of the fabric for which to get entries.
* @param [out] entries vector to hold the entries.
*/
CHIP_ERROR GetEntries(const FabricIndex fabricIndex, std::vector<Entry> & entries) const
{
auto it = mFabricEntries.find(fabricIndex);
if (it == mFabricEntries.end())
{
return CHIP_ERROR_NOT_FOUND;
}
entries = (it->second);
return CHIP_NO_ERROR;
}
protected:
/**
* Initiate a review of the access restrictions for a fabric. This method should be implemented by the platform and be
* non-blocking.
*
* @param [in] fabricIndex The index of the fabric requesting a review.
* @param [in] token The unique token for the review, which can be matched to a review update event.
* @param [in] arl An optinal list of access restriction entries to review. If null, all entries will be reviewed.
* @return CHIP_NO_ERROR if the review was successfully requested, or an error code if the request failed.
*/
virtual CHIP_ERROR DoRequestFabricRestrictionReview(const FabricIndex fabricIndex, uint64_t token,
const std::vector<Entry> & arl) = 0;
private:
/**
* Perform the access restriction check using the given entries.
*/
CHIP_ERROR DoCheck(const std::vector<Entry> & entries, const SubjectDescriptor & subjectDescriptor,
const RequestPath & requestPath);
uint64_t mNextToken = 1;
Listener * mListeners = nullptr;
StandardAccessRestrictionExceptionChecker mExceptionChecker;
std::vector<Entry> mCommissioningEntries;
std::map<FabricIndex, std::vector<Entry>> mFabricEntries;
};
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,37 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
namespace chip {
namespace Access {
// Using bitfield values so auth mode and privilege set can be stored together.
// Auth mode should have only one value expressed, which should not be None.
enum class AuthMode : uint8_t
{
kNone = 0,
kPase = 1 << 5,
kCase = 1 << 6,
kGroup = 1 << 7
};
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,39 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <cstdint>
namespace chip {
namespace Access {
// Using bitfield values so privilege set and auth mode can be stored together.
// Privilege set can have more than one value expressed (e.g. View,
// ProxyView, and Operate).
enum class Privilege : uint8_t
{
kView = 1 << 0,
kProxyView = 1 << 1,
kOperate = 1 << 2,
kManage = 1 << 3,
kAdminister = 1 << 4
};
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,48 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <lib/core/DataModelTypes.h>
#include <optional>
namespace chip {
namespace Access {
enum class RequestType : uint8_t
{
kRequestTypeUnknown,
kAttributeReadRequest,
kAttributeWriteRequest,
kCommandInvokeRequest,
kEventReadRequest
};
struct RequestPath
{
// NOTE: eventually this will likely also contain node, for proxying
ClusterId cluster = 0;
EndpointId endpoint = 0;
RequestType requestType = RequestType::kRequestTypeUnknown;
// entityId represents an attribute, command, or event ID, which is determined by the requestType. Wildcard if omitted.
std::optional<uint32_t> entityId;
};
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,52 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "AuthMode.h"
#include <lib/core/CASEAuthTag.h>
#include <lib/core/DataModelTypes.h>
#include <lib/core/NodeId.h>
namespace chip {
namespace Access {
struct SubjectDescriptor
{
// Holds FabricIndex of fabric, 0 if no fabric.
FabricIndex fabricIndex = kUndefinedFabricIndex;
// Holds AuthMode of subject(s), kNone if no access.
AuthMode authMode = AuthMode::kNone;
// NOTE: due to packing there should be free bytes here
// Holds subject according to auth mode.
NodeId subject = kUndefinedNodeId;
// CASE Authenticated Tags (CATs) only valid if auth mode is CASE.
CATValues cats;
// Whether the subject is currently a pending commissionee. See `IsCommissioning`
// definition in Core Specification's ACL Architecture pseudocode.
bool isCommissioning = false;
};
} // namespace Access
} // namespace chip

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,38 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "access/AccessControl.h"
#include <lib/core/CHIPPersistentStorageDelegate.h>
namespace chip {
namespace Access {
namespace Examples {
/**
* @brief Get a global instance of the access control delegate implemented in this module.
*
* NOTE: This function should be followed by an ::Init() method call. This function does
* not manage lifecycle considerations.
*
* @return a pointer to the AccessControl::Delegate singleton.
*/
AccessControl::Delegate * GetAccessControlDelegate();
} // namespace Examples
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,84 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "PermissiveAccessControlDelegate.h"
namespace {
using namespace chip;
using namespace chip::Access;
using Entry = chip::Access::AccessControl::Entry;
using EntryIterator = chip::Access::AccessControl::EntryIterator;
using Target = Entry::Target;
class AccessControlDelegate : public AccessControl::Delegate
{
public:
CHIP_ERROR Init() override { return CHIP_NO_ERROR; }
void Finish() override {}
// Capabilities
CHIP_ERROR GetMaxEntryCount(size_t & value) const override
{
value = 0;
return CHIP_NO_ERROR;
}
// Actualities
CHIP_ERROR GetEntryCount(size_t & value) const override
{
value = 0;
return CHIP_NO_ERROR;
}
// Preparation
CHIP_ERROR PrepareEntry(Entry & entry) override { return CHIP_NO_ERROR; }
// CRUD
CHIP_ERROR CreateEntry(size_t * index, const Entry & entry, FabricIndex * fabricIndex) override { return CHIP_NO_ERROR; }
CHIP_ERROR ReadEntry(size_t index, Entry & entry, const FabricIndex * fabricIndex) const override { return CHIP_NO_ERROR; }
CHIP_ERROR UpdateEntry(size_t index, const Entry & entry, const FabricIndex * fabricIndex) override { return CHIP_NO_ERROR; }
CHIP_ERROR DeleteEntry(size_t index, const FabricIndex * fabricIndex) override { return CHIP_NO_ERROR; }
// Iteration
CHIP_ERROR Entries(EntryIterator & iterator, const FabricIndex * fabricIndex) const override { return CHIP_NO_ERROR; }
// Check
CHIP_ERROR Check(const SubjectDescriptor & subjectDescriptor, const RequestPath & requestPath,
Privilege requestPrivilege) override
{
return CHIP_NO_ERROR;
}
};
} // namespace
namespace chip {
namespace Access {
namespace Examples {
AccessControl::Delegate * GetPermissiveAccessControlDelegate()
{
static AccessControlDelegate accessControlDelegate;
return &accessControlDelegate;
}
} // namespace Examples
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,29 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "access/AccessControl.h"
namespace chip {
namespace Access {
namespace Examples {
AccessControl::Delegate * GetPermissiveAccessControlDelegate();
} // namespace Examples
} // namespace Access
} // namespace chip

View File

@@ -0,0 +1,22 @@
/*
*
* Copyright (c) 2020-2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#if CHIP_HAVE_CONFIG_H
#include <app/AppBuildConfig.h>
#endif

View File

@@ -0,0 +1,162 @@
/*
*
* Copyright (c) 2021-2022 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/AttributeReportBuilder.h>
#include <app/AttributeValueDecoder.h>
#include <app/AttributeValueEncoder.h>
#include <lib/core/CHIPError.h>
/**
* Callback class that clusters can implement in order to interpose custom
* attribute-handling logic. An AttributeAccessInterface instance is associated
* with some specific cluster. A single instance may be used for a specific
* endpoint or for all endpoints.
*
* Instances of AttributeAccessInterface that are registered via
* AttributeAccessInterfaceRegistry::Instance().Register will be consulted before taking the
* normal attribute access codepath and can use that codepath as a fallback if desired.
*/
namespace chip {
namespace app {
class AttributeAccessInterface
{
public:
/**
* aEndpointId can be Missing to indicate that this object is meant to be
* used with all endpoints.
*/
AttributeAccessInterface(Optional<EndpointId> aEndpointId, ClusterId aClusterId) :
mEndpointId(aEndpointId), mClusterId(aClusterId)
{}
virtual ~AttributeAccessInterface() {}
/**
* Callback for reading attributes.
*
* @param [in] aPath indicates which exact data is being read.
* @param [in] aEncoder the AttributeValueEncoder to use for encoding the
* data.
*
* The implementation can do one of three things:
*
* 1) Return a failure. This is treated as a failed read and the error is
* returned to the client, by converting it to a StatusIB.
* 2) Return success and attempt to encode data using aEncoder. The data is
* returned to the client.
* 3) Return success and not attempt to encode any data using aEncoder. In
* this case, Ember attribute access will happen for the read. This may
* involve reading from the attribute store or external attribute
* callbacks.
*/
virtual CHIP_ERROR Read(const ConcreteReadAttributePath & aPath, AttributeValueEncoder & aEncoder) = 0;
/**
* Callback for writing attributes.
*
* @param [in] aPath indicates which exact data is being written.
* @param [in] aDecoder the AttributeValueDecoder to use for decoding the
* data.
*
* The implementation can do one of three things:
*
* 1) Return a failure. This is treated as a failed write and the error is
* sent to the client, by converting it to a StatusIB.
* 2) Return success and attempt to decode from aDecoder. This is
* treated as a successful write.
* 3) Return success and not attempt to decode from aDecoder. In
* this case, Ember attribute access will happen for the write. This may
* involve writing to the attribute store or external attribute
* callbacks.
*/
virtual CHIP_ERROR Write(const ConcreteDataAttributePath & aPath, AttributeValueDecoder & aDecoder) { return CHIP_NO_ERROR; }
/**
* Indicates the start of a series of list operations. This function will be called before the first Write operation of a series
* of consequence attribute data of the same attribute.
*
* 1) This function will be called if the client tries to set a nullable list attribute to null.
* 2) This function will only be called once for a series of consequent attribute data (regardless the kind of list operation)
* of the same attribute.
*
* @param [in] aPath indicates the path of the modified list.
*/
virtual void OnListWriteBegin(const ConcreteAttributePath & aPath) {}
/**
* Indicates the end of a series of list operations. This function will be called after the last Write operation of a series
* of consequence attribute data of the same attribute.
*
* 1) This function will be called if the client tries to set a nullable list attribute to null.
* 2) This function will only be called once for a series of consequent attribute data (regardless the kind of list operation)
* of the same attribute.
* 3) When aWriteWasSuccessful is true, the data written must be consistent or the list is untouched.
*
* @param [in] aPath indicates the path of the modified list
* @param [in] aWriteWasSuccessful indicates whether the delivered list is complete.
*
*/
virtual void OnListWriteEnd(const ConcreteAttributePath & aPath, bool aWriteWasSuccessful) {}
/**
* Mechanism for keeping track of a chain of AttributeAccessInterfaces.
*/
void SetNext(AttributeAccessInterface * aNext) { mNext = aNext; }
AttributeAccessInterface * GetNext() const { return mNext; }
/**
* Check whether a this AttributeAccessInterface is relevant for a
* particular endpoint+cluster. An AttributeAccessInterface will be used
* for a read from a particular cluster only when this function returns
* true.
*/
bool Matches(EndpointId aEndpointId, ClusterId aClusterId) const
{
return (!mEndpointId.HasValue() || mEndpointId.Value() == aEndpointId) && mClusterId == aClusterId;
}
/**
* Check whether an AttributeAccessInterface is relevant for a particular
* specific endpoint. This is used to clean up overrides registered for an
* endpoint that becomes disabled.
*/
bool MatchesEndpoint(EndpointId aEndpointId) const { return mEndpointId.HasValue() && mEndpointId.Value() == aEndpointId; }
/**
* Check whether another AttributeAccessInterface wants to handle the same set of
* attributes as we do.
*/
bool Matches(const AttributeAccessInterface & aOther) const
{
return mClusterId == aOther.mClusterId &&
(!mEndpointId.HasValue() || !aOther.mEndpointId.HasValue() || mEndpointId.Value() == aOther.mEndpointId.Value());
}
protected:
Optional<EndpointId> GetEndpointId() { return mEndpointId; }
private:
Optional<EndpointId> mEndpointId;
ClusterId mClusterId;
AttributeAccessInterface * mNext = nullptr;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,145 @@
/*
*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#include <app/AttributeAccessInterface.h>
#include <lib/core/DataModelTypes.h>
namespace chip {
namespace app {
/**
* @brief Cache to make look-up of AttributeAccessInterface (AAI) instances faster.
*
* This cache makes use of the fact that looking-up AttributeAccessInterface
* instances is usually done in loops, during read/subscription wildcard
* expansion, and there is a significant amount of locality.
*
* This cache records both "used" (i.e. uses AAI) and the single last
* "unused" (i.e. does NOT use AAI) entries. Combining positive/negative
* lookup led to factor of ~10 reduction of AAI lookups in total for wildcard
* reads on chip-all-clusters-app, with a cache size of 1. Increasing the size did not
* significantly improve the performance.
*/
class AttributeAccessInterfaceCache
{
public:
enum class CacheResult
{
kCacheMiss,
kDefinitelyUnused,
kDefinitelyUsed
};
AttributeAccessInterfaceCache() { Invalidate(); }
/**
* @brief Invalidate the whole cache. Must be called every time list of AAI registrations changes.
*/
void Invalidate()
{
for (auto & entry : mCacheSlots)
{
entry.Invalidate();
}
mLastUnusedEntry.Invalidate();
}
/**
* @brief Mark that we know a given <`endpointId`, `clusterId`> uses AAI, with instance `attrInterface`
*/
void MarkUsed(EndpointId endpointId, ClusterId clusterId, AttributeAccessInterface * attrInterface)
{
GetCacheSlot(endpointId, clusterId)->Set(endpointId, clusterId, attrInterface);
}
/**
* @brief Mark that we know a given <`endpointId`, `clusterId`> does NOT use AAI.
*/
void MarkUnused(EndpointId endpointId, ClusterId clusterId) { mLastUnusedEntry.Set(endpointId, clusterId, nullptr); }
/**
* @brief Get the AttributeAccessInterface instance for a given <`endpointId`, `clusterId`>, if present in cache.
*
* @param endpointId - Endpoint ID to look-up.
* @param clusterId - Cluster ID to look-up.
* @param outAttributeAccess - If not null, and Get returns `kDefinitelyUsed`, then this is set to the instance pointer.
* @return a for whether the entry is actually used or not.
*/
CacheResult Get(EndpointId endpointId, ClusterId clusterId, AttributeAccessInterface ** outAttributeAccess)
{
if (mLastUnusedEntry.Matches(endpointId, clusterId))
{
return CacheResult::kDefinitelyUnused;
}
AttributeAccessCacheEntry * cacheSlot = GetCacheSlot(endpointId, clusterId);
if (cacheSlot->Matches(endpointId, clusterId) && (cacheSlot->accessor != nullptr))
{
if (outAttributeAccess != nullptr)
{
*outAttributeAccess = cacheSlot->accessor;
}
return CacheResult::kDefinitelyUsed;
}
return CacheResult::kCacheMiss;
}
private:
struct AttributeAccessCacheEntry
{
EndpointId endpointId = kInvalidEndpointId;
ClusterId clusterId = kInvalidClusterId;
AttributeAccessInterface * accessor = nullptr;
void Invalidate()
{
endpointId = kInvalidEndpointId;
clusterId = kInvalidClusterId;
accessor = nullptr;
}
void Set(EndpointId theEndpointId, ClusterId theClusterId, AttributeAccessInterface * theAccessor)
{
endpointId = theEndpointId;
clusterId = theClusterId;
accessor = theAccessor;
}
bool Matches(EndpointId theEndpointId, ClusterId theClusterId) const
{
return (endpointId == theEndpointId) && (clusterId == theClusterId);
}
};
AttributeAccessCacheEntry * GetCacheSlot(EndpointId endpointId, ClusterId clusterId)
{
(void) endpointId;
(void) clusterId;
return &mCacheSlots[0];
}
AttributeAccessCacheEntry mCacheSlots[1];
AttributeAccessCacheEntry mLastUnusedEntry;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,132 @@
/**
* Copyright (c) 2024 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "app/AttributeAccessInterface.h"
#include <app/AttributeAccessInterfaceRegistry.h>
#include <app/AttributeAccessInterfaceCache.h>
namespace {
using chip::app::AttributeAccessInterface;
// shouldUnregister returns true if the given AttributeAccessInterface should be
// unregistered.
template <typename F>
void UnregisterMatchingAttributeAccessInterfaces(F shouldUnregister, AttributeAccessInterface *& list_head)
{
AttributeAccessInterface * prev = nullptr;
AttributeAccessInterface * cur = list_head;
while (cur)
{
AttributeAccessInterface * next = cur->GetNext();
if (shouldUnregister(cur))
{
// Remove it from the list
if (prev)
{
prev->SetNext(next);
}
else
{
list_head = next;
}
cur->SetNext(nullptr);
// Do not change prev in this case.
}
else
{
prev = cur;
}
cur = next;
}
}
} // namespace
namespace chip {
namespace app {
AttributeAccessInterfaceRegistry & AttributeAccessInterfaceRegistry::Instance()
{
static AttributeAccessInterfaceRegistry instance;
return instance;
}
void AttributeAccessInterfaceRegistry::Unregister(AttributeAccessInterface * attrOverride)
{
mAttributeAccessInterfaceCache.Invalidate();
UnregisterMatchingAttributeAccessInterfaces([attrOverride](AttributeAccessInterface * entry) { return entry == attrOverride; },
mAttributeAccessOverrides);
}
void AttributeAccessInterfaceRegistry::UnregisterAllForEndpoint(EndpointId endpointId)
{
mAttributeAccessInterfaceCache.Invalidate();
UnregisterMatchingAttributeAccessInterfaces(
[endpointId](AttributeAccessInterface * entry) { return entry->MatchesEndpoint(endpointId); }, mAttributeAccessOverrides);
}
bool AttributeAccessInterfaceRegistry::Register(AttributeAccessInterface * attrOverride)
{
mAttributeAccessInterfaceCache.Invalidate();
for (auto * cur = mAttributeAccessOverrides; cur; cur = cur->GetNext())
{
if (cur->Matches(*attrOverride))
{
ChipLogError(InteractionModel, "Duplicate attribute override registration failed");
return false;
}
}
attrOverride->SetNext(mAttributeAccessOverrides);
mAttributeAccessOverrides = attrOverride;
return true;
}
AttributeAccessInterface * AttributeAccessInterfaceRegistry::Get(EndpointId endpointId, ClusterId clusterId)
{
using CacheResult = AttributeAccessInterfaceCache::CacheResult;
AttributeAccessInterface * cached = nullptr;
CacheResult result = mAttributeAccessInterfaceCache.Get(endpointId, clusterId, &cached);
switch (result)
{
case CacheResult::kDefinitelyUnused:
return nullptr;
case CacheResult::kDefinitelyUsed:
return cached;
case CacheResult::kCacheMiss:
default:
// Did not cache yet, search set of AAI registered, and cache if found.
for (app::AttributeAccessInterface * cur = mAttributeAccessOverrides; cur; cur = cur->GetNext())
{
if (cur->Matches(endpointId, clusterId))
{
mAttributeAccessInterfaceCache.MarkUsed(endpointId, clusterId, cur);
return cur;
}
}
// Did not find AAI registered: mark as definitely not using.
mAttributeAccessInterfaceCache.MarkUnused(endpointId, clusterId);
}
return nullptr;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,64 @@
/**
* Copyright (c) 2024 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/AttributeAccessInterface.h>
#include <app/AttributeAccessInterfaceCache.h>
namespace chip {
namespace app {
class AttributeAccessInterfaceRegistry
{
public:
/**
* Register an attribute access override. It will remain registered until the
* endpoint it's registered for is disabled (or until shutdown if it's
* registered for all endpoints) or until it is explicitly unregistered.
* Registration will fail if there is an already-registered override for the
* same set of attributes.
*
* @return false if there is an existing override that the new one would
* conflict with. In this case the override is not registered.
* @return true if registration was successful.
*/
bool Register(AttributeAccessInterface * attrOverride);
/**
* Unregister an attribute access override (for example if the object
* implementing AttributeAccessInterface is being destroyed).
*/
void Unregister(AttributeAccessInterface * attrOverride);
/**
* Unregister all attribute access interfaces that match this given endpoint.
*/
void UnregisterAllForEndpoint(EndpointId endpointId);
/**
* Get the registered attribute access override. nullptr when attribute access override is not found.
*/
AttributeAccessInterface * Get(EndpointId aEndpointId, ClusterId aClusterId);
static AttributeAccessInterfaceRegistry & Instance();
private:
AttributeAccessInterface * mAttributeAccessOverrides = nullptr;
AttributeAccessInterfaceCache mAttributeAccessInterfaceCache;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,45 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <access/Privilege.h>
#include <app/ConcreteAttributePath.h>
namespace chip {
namespace app {
/**
* AttributeAccessToken records the privilege granted for accessing the specified attribute. This struct is used in chunked write
* to avoid losing privilege when updating ACL items.
*/
struct AttributeAccessToken
{
ConcreteAttributePath mPath;
Access::Privilege mPrivilege;
bool operator==(const AttributeAccessToken & other) const { return mPath == other.mPath && mPrivilege == other.mPrivilege; }
bool Matches(const ConcreteAttributePath & aPath, const Access::Privilege & aPrivilege) const
{
return mPath == aPath && mPrivilege == aPrivilege;
}
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,103 @@
/*
* Copyright (c) 2021-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <lib/core/DataModelTypes.h>
namespace chip {
namespace app {
/// Maintains the internal state of list encoding
///
/// List encoding is generally assumed incremental and chunkable (i.e.
/// partial encoding is ok.). For this purpose the class maintains two
/// pieces of data:
/// - AllowPartialData tracks if partial encoding is acceptable in the
/// current encoding state (to be used for atomic/non-atomic list item writes)
/// - CurrentEncodingListIndex representing the list index that is next
/// to be encoded in the output. kInvalidListIndex means that a new list
/// encoding has been started.
class AttributeEncodeState
{
public:
AttributeEncodeState() = default;
/// Allows the encode state to be initialized from an OPTIONAL
/// other encoding state
///
/// if other is nullptr, this is the same as the default initializer.
AttributeEncodeState(const AttributeEncodeState * other)
{
if (other != nullptr)
{
*this = *other;
}
else
{
mCurrentEncodingListIndex = kInvalidListIndex;
mAllowPartialData = false;
}
}
bool AllowPartialData() const { return mAllowPartialData; }
ListIndex CurrentEncodingListIndex() const { return mCurrentEncodingListIndex; }
AttributeEncodeState & SetAllowPartialData(bool allow)
{
mAllowPartialData = allow;
return *this;
}
AttributeEncodeState & SetCurrentEncodingListIndex(ListIndex idx)
{
mCurrentEncodingListIndex = idx;
return *this;
}
void Reset()
{
mCurrentEncodingListIndex = kInvalidListIndex;
mAllowPartialData = false;
}
private:
/**
* If set to kInvalidListIndex, indicates that we have not encoded any data for the list yet and
* need to start by encoding an empty list before we start encoding any list items.
*
* When set to a valid ListIndex value, indicates the index of the next list item that needs to be
* encoded (i.e. the count of items encoded so far).
*/
ListIndex mCurrentEncodingListIndex = kInvalidListIndex;
/**
* When an attempt to encode an attribute returns an error, the buffer may contain tailing dirty data
* (since the put was aborted). The report engine normally rolls back the buffer to right before encoding
* of the attribute started on errors.
*
* When chunking a list, EncodeListItem will atomically encode list items, ensuring that the
* state of the buffer is valid to send (i.e. contains no trailing garbage), and return an error
* if the list doesn't entirely fit. In this situation, mAllowPartialData is set to communicate to the
* report engine that it should not roll back the list items.
*
* TODO: There might be a better name for this variable.
*/
bool mAllowPartialData = false;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,261 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/AttributePathExpandIterator-Ember.h>
#include <app/AttributePathParams.h>
#include <app/ConcreteAttributePath.h>
#include <app/EventManagement.h>
#include <app/GlobalAttributes.h>
#include <app/util/att-storage.h>
#include <app/util/endpoint-config-api.h>
#include <lib/core/CHIPCore.h>
#include <lib/core/TLVDebug.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/DLLUtil.h>
#include <lib/support/logging/CHIPLogging.h>
using namespace chip;
// TODO: Need to make it so that declarations of things that don't depend on generated files are not intermixed in af.h with
// dependencies on generated files, so we don't have to re-declare things here.
// Note: Some of the generated files that depended by af.h are gen_config.h and gen_tokens.h
typedef uint8_t EmberAfClusterMask;
extern uint16_t emberAfEndpointCount();
extern uint16_t emberAfIndexFromEndpoint(EndpointId endpoint);
extern uint8_t emberAfClusterCount(EndpointId endpoint, bool server);
extern uint16_t emberAfGetServerAttributeCount(chip::EndpointId endpoint, chip::ClusterId cluster);
extern uint16_t emberAfGetServerAttributeIndexByAttributeId(chip::EndpointId endpoint, chip::ClusterId cluster,
chip::AttributeId attributeId);
extern chip::EndpointId emberAfEndpointFromIndex(uint16_t index);
extern Optional<ClusterId> emberAfGetNthClusterId(chip::EndpointId endpoint, uint8_t n, bool server);
extern Optional<AttributeId> emberAfGetServerAttributeIdByIndex(chip::EndpointId endpoint, chip::ClusterId cluster,
uint16_t attributeIndex);
extern uint8_t emberAfClusterIndex(EndpointId endpoint, ClusterId clusterId, EmberAfClusterMask mask);
extern bool emberAfEndpointIndexIsEnabled(uint16_t index);
namespace chip {
namespace app {
AttributePathExpandIteratorEmber::AttributePathExpandIteratorEmber(DataModel::Provider *,
SingleLinkedListNode<AttributePathParams> * aAttributePath) :
mpAttributePath(aAttributePath)
{
// Reset iterator state
mEndpointIndex = UINT16_MAX;
mClusterIndex = UINT8_MAX;
mAttributeIndex = UINT16_MAX;
static_assert(std::numeric_limits<decltype(mGlobalAttributeIndex)>::max() >= ArraySize(GlobalAttributesNotInMetadata),
"Our index won't be able to hold the value we need to hold.");
static_assert(std::is_same<decltype(mGlobalAttributeIndex), uint8_t>::value,
"If this changes audit all uses where we set to UINT8_MAX");
mGlobalAttributeIndex = UINT8_MAX;
// Make the iterator ready to emit the first valid path in the list.
Next();
}
void AttributePathExpandIteratorEmber::PrepareEndpointIndexRange(const AttributePathParams & aAttributePath)
{
if (aAttributePath.HasWildcardEndpointId())
{
mEndpointIndex = 0;
mEndEndpointIndex = emberAfEndpointCount();
}
else
{
mEndpointIndex = emberAfIndexFromEndpoint(aAttributePath.mEndpointId);
// If the given cluster id does not exist on the given endpoint, it will return uint16(0xFFFF), then endEndpointIndex
// will be 0, means we should iterate a null endpoint set (skip it).
mEndEndpointIndex = static_cast<uint16_t>(mEndpointIndex + 1);
}
}
void AttributePathExpandIteratorEmber::PrepareClusterIndexRange(const AttributePathParams & aAttributePath, EndpointId aEndpointId)
{
if (aAttributePath.HasWildcardClusterId())
{
mClusterIndex = 0;
mEndClusterIndex = emberAfClusterCount(aEndpointId, true /* server */);
}
else
{
mClusterIndex = emberAfClusterIndex(aEndpointId, aAttributePath.mClusterId, CLUSTER_MASK_SERVER);
// If the given cluster id does not exist on the given endpoint, it will return uint8(0xFF), then endClusterIndex
// will be 0, means we should iterate a null cluster set (skip it).
mEndClusterIndex = static_cast<uint8_t>(mClusterIndex + 1);
}
}
void AttributePathExpandIteratorEmber::PrepareAttributeIndexRange(const AttributePathParams & aAttributePath,
EndpointId aEndpointId, ClusterId aClusterId)
{
if (aAttributePath.HasWildcardAttributeId())
{
mAttributeIndex = 0;
mEndAttributeIndex = emberAfGetServerAttributeCount(aEndpointId, aClusterId);
mGlobalAttributeIndex = 0;
mGlobalAttributeEndIndex = ArraySize(GlobalAttributesNotInMetadata);
}
else
{
mAttributeIndex = emberAfGetServerAttributeIndexByAttributeId(aEndpointId, aClusterId, aAttributePath.mAttributeId);
// If the given attribute id does not exist on the given endpoint, it will return uint16(0xFFFF), then endAttributeIndex
// will be 0, means we should iterate a null attribute set (skip it).
mEndAttributeIndex = static_cast<uint16_t>(mAttributeIndex + 1);
if (mAttributeIndex == UINT16_MAX)
{
// Check whether this is a non-metadata global attribute.
//
// Default to the max value, which will correspond (after we add 1
// and overflow to 0 for the max index) to us not going through
// non-metadata global attributes for this attribute.
mGlobalAttributeIndex = UINT8_MAX;
static_assert(ArraySize(GlobalAttributesNotInMetadata) <= UINT8_MAX, "Iterating over at most 256 array entries");
const uint8_t arraySize = static_cast<uint8_t>(ArraySize(GlobalAttributesNotInMetadata));
for (uint8_t idx = 0; idx < arraySize; ++idx)
{
if (GlobalAttributesNotInMetadata[idx] == aAttributePath.mAttributeId)
{
mGlobalAttributeIndex = idx;
break;
}
}
mGlobalAttributeEndIndex = static_cast<uint8_t>(mGlobalAttributeIndex + 1);
}
else
{
mGlobalAttributeIndex = UINT8_MAX;
mGlobalAttributeEndIndex = 0;
}
}
}
void AttributePathExpandIteratorEmber::ResetCurrentCluster()
{
// If this is a null iterator, or the attribute id of current cluster info is not a wildcard attribute id, then this function
// will do nothing, since we won't be expanding the wildcard attribute ids under a cluster.
VerifyOrReturn(mpAttributePath != nullptr && mpAttributePath->mValue.HasWildcardAttributeId());
// Otherwise, we will reset the index for iterating the attributes, so we report the attributes for this cluster again. This
// will ensure that the client sees a coherent view of the cluster from the reports generated by a single (wildcard) attribute
// path in the request.
//
// Note that when Next() returns, we must be in one of the following states:
// - This is not a wildcard path
// - We just expanded some attribute id field
// - We have exhausted all paths
// Only the second case will happen here since the above check will fail for 1 and 3, so the following Next() call must result
// in a valid path, which is the first attribute id we will emit for the current cluster.
mAttributeIndex = UINT16_MAX;
mGlobalAttributeIndex = UINT8_MAX;
Next();
}
bool AttributePathExpandIteratorEmber::Next()
{
for (; mpAttributePath != nullptr; (mpAttributePath = mpAttributePath->mpNext, mEndpointIndex = UINT16_MAX))
{
mOutputPath.mExpanded = mpAttributePath->mValue.IsWildcardPath();
if (mEndpointIndex == UINT16_MAX)
{
// Special case: If this is a concrete path, we just return its value as-is.
if (!mpAttributePath->mValue.IsWildcardPath())
{
mOutputPath.mEndpointId = mpAttributePath->mValue.mEndpointId;
mOutputPath.mClusterId = mpAttributePath->mValue.mClusterId;
mOutputPath.mAttributeId = mpAttributePath->mValue.mAttributeId;
// Prepare for next iteration
mEndpointIndex = mEndEndpointIndex = 0;
return true;
}
PrepareEndpointIndexRange(mpAttributePath->mValue);
mClusterIndex = UINT8_MAX;
}
for (; mEndpointIndex < mEndEndpointIndex;
(mEndpointIndex++, mClusterIndex = UINT8_MAX, mAttributeIndex = UINT16_MAX, mGlobalAttributeIndex = UINT8_MAX))
{
if (!emberAfEndpointIndexIsEnabled(mEndpointIndex))
{
// Not an enabled endpoint; skip it.
continue;
}
EndpointId endpointId = emberAfEndpointFromIndex(mEndpointIndex);
if (mClusterIndex == UINT8_MAX)
{
PrepareClusterIndexRange(mpAttributePath->mValue, endpointId);
mAttributeIndex = UINT16_MAX;
mGlobalAttributeIndex = UINT8_MAX;
}
for (; mClusterIndex < mEndClusterIndex;
(mClusterIndex++, mAttributeIndex = UINT16_MAX, mGlobalAttributeIndex = UINT8_MAX))
{
// emberAfGetNthClusterId must return a valid cluster id here since we have verified the mClusterIndex does
// not exceed the mEndClusterIndex.
ClusterId clusterId = emberAfGetNthClusterId(endpointId, mClusterIndex, true /* server */).Value();
if (mAttributeIndex == UINT16_MAX && mGlobalAttributeIndex == UINT8_MAX)
{
PrepareAttributeIndexRange(mpAttributePath->mValue, endpointId, clusterId);
}
if (mAttributeIndex < mEndAttributeIndex)
{
// GetServerAttributeIdByIdex must return a valid attribute here since we have verified the mAttributeIndex does
// not exceed the mEndAttributeIndex.
mOutputPath.mAttributeId = emberAfGetServerAttributeIdByIndex(endpointId, clusterId, mAttributeIndex).Value();
mOutputPath.mClusterId = clusterId;
mOutputPath.mEndpointId = endpointId;
mAttributeIndex++;
// We found a valid attribute path, now return and increase the attribute index for next iteration.
// Return true will skip the increment of mClusterIndex, mEndpointIndex and mpAttributePath.
return true;
}
if (mGlobalAttributeIndex < mGlobalAttributeEndIndex)
{
// Return a path pointing to the next global attribute.
mOutputPath.mAttributeId = GlobalAttributesNotInMetadata[mGlobalAttributeIndex];
mOutputPath.mClusterId = clusterId;
mOutputPath.mEndpointId = endpointId;
mGlobalAttributeIndex++;
return true;
}
// We have exhausted all attributes of this cluster, continue iterating over attributes of next cluster.
}
// We have exhausted all clusters of this endpoint, continue iterating over clusters of next endpoint.
}
// We have exhausted all endpoints in this cluster info, continue iterating over next cluster info item.
}
// Reset to default, invalid value.
mOutputPath = ConcreteReadAttributePath();
return false;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,136 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* Defines an iterator for iterating all possible paths from a list of AttributePathParams-s according to spec section 8.9.2.2
* (Valid Attribute Paths)
*/
#pragma once
#include <app/AttributePathParams.h>
#include <app/ConcreteAttributePath.h>
#include <app/EventManagement.h>
#include <app/data-model-provider/Provider.h>
#include <lib/core/CHIPCore.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/DLLUtil.h>
#include <lib/support/LinkedList.h>
#include <lib/support/logging/CHIPLogging.h>
#include <messaging/ExchangeContext.h>
#include <messaging/ExchangeMgr.h>
#include <messaging/Flags.h>
#include <protocols/Protocols.h>
namespace chip {
namespace app {
/**
* AttributePathExpandIteratorEmber is used to iterate over a linked list of AttributePathParams-s.
* The AttributePathExpandIteratorEmber is copiable, however, the given cluster info must be valid when calling Next().
*
* AttributePathExpandIteratorEmber will expand attribute paths with wildcards, and only emit existing paths for AttributePathParams
* with wildcards. For AttributePathParams with a concrete path (i.e. does not contain wildcards), AttributePathExpandIteratorEmber
* will emit them as-is.
*
* The typical use of AttributePathExpandIteratorEmber may look like:
* ConcreteAttributePath path;
* for (AttributePathExpandIteratorEmber iterator(AttributePathParams); iterator.Get(path); iterator.Next()) {...}
*
* The iterator does not copy the given AttributePathParams, The given AttributePathParams must be valid when using the iterator.
* If the set of endpoints, clusters, or attributes that are supported changes, AttributePathExpandIteratorEmber must be
* reinitialized.
*
* A initialized iterator will return the first valid path, no need to call Next() before calling Get() for the first time.
*
* Note: The Next() and Get() are two separate operations by design since a possible call of this iterator might be:
* - Get()
* - Chunk full, return
* - In a new chunk, Get()
*
* TODO: The AttributePathParams may support a group id, the iterator should be able to call group data provider to expand the group
* id.
*/
class AttributePathExpandIteratorEmber
{
public:
AttributePathExpandIteratorEmber(DataModel::Provider *, // datamodel is NOT used by this class
SingleLinkedListNode<AttributePathParams> * aAttributePath);
/**
* Proceed the iterator to the next attribute path in the given cluster info.
*
* Returns false if AttributePathExpandIteratorEmber has exhausted all paths in the given AttributePathParams list.
*/
bool Next();
/**
* Fills the aPath with the path the iterator currently points to.
* Returns false if the iterator is not pointing to a valid path (i.e. it has exhausted the cluster info).
*/
bool Get(ConcreteAttributePath & aPath)
{
aPath = mOutputPath;
return (mpAttributePath != nullptr); // still handling some path
}
/**
* Reset the iterator to the beginning of current cluster if we are in the middle of expanding a wildcard attribute id for some
* cluster.
*
* When attributes are changed in the middle of expanding a wildcard attribute, we need to reset the iterator, to provide the
* client with a consistent state of the cluster.
*/
void ResetCurrentCluster();
/** Start iterating over the given `paths` */
inline void ResetTo(SingleLinkedListNode<AttributePathParams> * paths)
{
*this = AttributePathExpandIteratorEmber(nullptr /* data model is not used */, paths);
}
private:
SingleLinkedListNode<AttributePathParams> * mpAttributePath;
ConcreteAttributePath mOutputPath;
uint16_t mEndpointIndex, mEndEndpointIndex;
uint16_t mAttributeIndex, mEndAttributeIndex;
// Note: should use decltype(EmberAfEndpointType::clusterCount) here, but af-types is including app specific generated files.
uint8_t mClusterIndex, mEndClusterIndex;
// For dealing with global attributes that are not part of the attribute
// metadata.
uint8_t mGlobalAttributeIndex, mGlobalAttributeEndIndex;
/**
* Prepare*IndexRange will update mBegin*Index and mEnd*Index variables.
* If AttributePathParams contains a wildcard field, it will set mBegin*Index to 0 and mEnd*Index to count.
* Or it will set mBegin*Index to the index of the Endpoint/Cluster/Attribute, and mEnd*Index to mBegin*Index + 1.
*
* If the Endpoint/Cluster/Attribute does not exist, mBegin*Index will be UINT*_MAX, and mEnd*Inde will be 0.
*
* The index can be used with emberAfEndpointFromIndex, emberAfGetNthClusterId and emberAfGetServerAttributeIdByIndex.
*/
void PrepareEndpointIndexRange(const AttributePathParams & aAttributePath);
void PrepareClusterIndexRange(const AttributePathParams & aAttributePath, EndpointId aEndpointId);
void PrepareAttributeIndexRange(const AttributePathParams & aAttributePath, EndpointId aEndpointId, ClusterId aClusterId);
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,46 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/AppConfig.h>
#if CHIP_CONFIG_USE_EMBER_DATA_MODEL && CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
#include <app/AttributePathExpandIterator-Checked.h>
#else
#if CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
#include <app/AttributePathExpandIterator-DataModel.h>
#else
#include <app/AttributePathExpandIterator-Ember.h>
#endif // CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
#endif // CHIP_CONFIG_USE_EMBER_DATA_MODEL && CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
namespace chip {
namespace app {
#if CHIP_CONFIG_USE_EMBER_DATA_MODEL && CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
using AttributePathExpandIterator = ::chip::app::AttributePathExpandIteratorChecked;
#else
#if CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
using AttributePathExpandIterator = ::chip::app::AttributePathExpandIteratorDataModel;
#else
using AttributePathExpandIterator = ::chip::app::AttributePathExpandIteratorEmber;
#endif // CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
#endif // CHIP_CONFIG_USE_EMBER_DATA_MODEL && CHIP_CONFIG_USE_DATA_MODEL_INTERFACE
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,141 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/AppConfig.h>
#include <app/ConcreteAttributePath.h>
#include <app/DataVersionFilter.h>
#include <app/util/basic-types.h>
namespace chip {
namespace app {
#if CHIP_CONFIG_ENABLE_READ_CLIENT
class ReadClient;
#endif // CHIP_CONFIG_ENABLE_READ_CLIENT
struct AttributePathParams
{
AttributePathParams() = default;
explicit AttributePathParams(EndpointId aEndpointId) :
AttributePathParams(aEndpointId, kInvalidClusterId, kInvalidAttributeId, kInvalidListIndex)
{}
//
// TODO: (Issue #10596) Need to ensure that we do not encode the NodeId over the wire
// if it is either not 'set', or is set to a value that matches accessing fabric
// on which the interaction is undertaken.
AttributePathParams(EndpointId aEndpointId, ClusterId aClusterId) :
AttributePathParams(aEndpointId, aClusterId, kInvalidAttributeId, kInvalidListIndex)
{}
AttributePathParams(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId) :
AttributePathParams(aEndpointId, aClusterId, aAttributeId, kInvalidListIndex)
{}
AttributePathParams(ClusterId aClusterId, AttributeId aAttributeId) :
AttributePathParams(kInvalidEndpointId, aClusterId, aAttributeId, kInvalidListIndex)
{}
AttributePathParams(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId, ListIndex aListIndex) :
mClusterId(aClusterId), mAttributeId(aAttributeId), mEndpointId(aEndpointId), mListIndex(aListIndex)
{}
[[nodiscard]] bool IsWildcardPath() const
{
return HasWildcardEndpointId() || HasWildcardClusterId() || HasWildcardAttributeId();
}
bool operator==(const AttributePathParams & aOther) const
{
return mEndpointId == aOther.mEndpointId && mClusterId == aOther.mClusterId && mAttributeId == aOther.mAttributeId &&
mListIndex == aOther.mListIndex;
}
/**
* SPEC 8.9.2.2
* Check that the path meets some basic constraints of an attribute path: If list index is not wildcard, then field id must not
* be wildcard. This does not verify that the attribute being targeted is actually of list type when the list index is not
* wildcard.
*/
[[nodiscard]] bool IsValidAttributePath() const { return HasWildcardListIndex() || !HasWildcardAttributeId(); }
[[nodiscard]] inline bool HasWildcardEndpointId() const { return mEndpointId == kInvalidEndpointId; }
[[nodiscard]] inline bool HasWildcardClusterId() const { return mClusterId == kInvalidClusterId; }
[[nodiscard]] inline bool HasWildcardAttributeId() const { return mAttributeId == kInvalidAttributeId; }
[[nodiscard]] inline bool HasWildcardListIndex() const { return mListIndex == kInvalidListIndex; }
inline void SetWildcardEndpointId() { mEndpointId = kInvalidEndpointId; }
inline void SetWildcardClusterId() { mClusterId = kInvalidClusterId; }
inline void SetWildcardAttributeId()
{
mAttributeId = kInvalidAttributeId;
mListIndex = kInvalidListIndex;
}
[[nodiscard]] bool IsAttributePathSupersetOf(const AttributePathParams & other) const
{
VerifyOrReturnError(HasWildcardEndpointId() || mEndpointId == other.mEndpointId, false);
VerifyOrReturnError(HasWildcardClusterId() || mClusterId == other.mClusterId, false);
VerifyOrReturnError(HasWildcardAttributeId() || mAttributeId == other.mAttributeId, false);
VerifyOrReturnError(HasWildcardListIndex() || mListIndex == other.mListIndex, false);
return true;
}
[[nodiscard]] bool IsAttributePathSupersetOf(const ConcreteAttributePath & other) const
{
VerifyOrReturnError(HasWildcardEndpointId() || mEndpointId == other.mEndpointId, false);
VerifyOrReturnError(HasWildcardClusterId() || mClusterId == other.mClusterId, false);
VerifyOrReturnError(HasWildcardAttributeId() || mAttributeId == other.mAttributeId, false);
return true;
}
bool Intersects(const AttributePathParams & other) const
{
VerifyOrReturnError(HasWildcardEndpointId() || other.HasWildcardEndpointId() || mEndpointId == other.mEndpointId, false);
VerifyOrReturnError(HasWildcardClusterId() || other.HasWildcardClusterId() || mClusterId == other.mClusterId, false);
VerifyOrReturnError(HasWildcardAttributeId() || other.HasWildcardAttributeId() || mAttributeId == other.mAttributeId,
false);
return true;
}
bool IncludesAttributesInCluster(const DataVersionFilter & other) const
{
VerifyOrReturnError(HasWildcardEndpointId() || mEndpointId == other.mEndpointId, false);
VerifyOrReturnError(HasWildcardClusterId() || mClusterId == other.mClusterId, false);
return true;
}
// check if input concrete cluster path is subset of current wildcard attribute
bool IncludesAllAttributesInCluster(const ConcreteClusterPath & aOther) const
{
VerifyOrReturnError(HasWildcardEndpointId() || mEndpointId == aOther.mEndpointId, false);
VerifyOrReturnError(HasWildcardClusterId() || mClusterId == aOther.mClusterId, false);
return HasWildcardAttributeId();
}
ClusterId mClusterId = kInvalidClusterId; // uint32
AttributeId mAttributeId = kInvalidAttributeId; // uint32
EndpointId mEndpointId = kInvalidEndpointId; // uint16
ListIndex mListIndex = kInvalidListIndex; // uint16
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,96 @@
/*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteAttributePath.h>
#include <app/util/attribute-metadata.h>
#include <lib/support/Span.h>
namespace chip {
namespace app {
/**
* Interface for persisting attribute values. This will write attributes in storage with platform endianness for scalars
* and uses a different key space from SafeAttributePersistenceProvider.
* When storing cluster attributes that are managed via the AttributeAccessInterface, it is recommended to
* use SafeAttributePersistenceProvider.
*/
class AttributePersistenceProvider
{
public:
virtual ~AttributePersistenceProvider() = default;
AttributePersistenceProvider() = default;
/**
* Write an attribute value from the attribute store (i.e. not a struct or
* list) to non-volatile memory.
*
* @param [in] aPath the attribute path for the data being written.
* @param [in] aValue the data to write. Integers and floats are
* represented in native endianness. Strings are represented
* as Pascal-style strings, as in ZCL, with a length prefix
* whose size depends on the actual string type. The length is
* stored as little-endian.
*
* Integer and float values have a size that matches the `size`
* member of aMetadata.
*
* String values have a size that corresponds to the actual size
* of the data in the string (including the length prefix),
* which is no larger than the `size` member of aMetadata.
*/
virtual CHIP_ERROR WriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue) = 0;
/**
* Read an attribute value from non-volatile memory.
*
* @param [in] aPath the attribute path for the data being persisted.
* @param [in] aMetadata the attribute metadata, as a convenience.
* @param [in,out] aValue where to place the data. The size of the buffer
* will be equal to `size` member of aMetadata.
*
* The data is expected to be in native endianness for
* integers and floats. For strings, see the string
* representation description in the WriteValue
* documentation.
*/
virtual CHIP_ERROR ReadValue(const ConcreteAttributePath & aPath, const EmberAfAttributeMetadata * aMetadata,
MutableByteSpan & aValue) = 0;
};
/**
* Instance getter for the global AttributePersistenceProvider.
*
* Callers have to externally synchronize usage of this function.
*
* @return The global AttributePersistenceProvider. This must never be null.
*/
AttributePersistenceProvider * GetAttributePersistenceProvider();
/**
* Instance setter for the global AttributePersistenceProvider.
*
* Callers have to externally synchronize usage of this function.
*
* If the `provider` is nullptr, the value is not changed.
*
* @param[in] aProvider the AttributePersistenceProvider implementation to use.
*/
void SetAttributePersistenceProvider(AttributePersistenceProvider * aProvider);
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,57 @@
/*
* Copyright (c) 2021-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/AttributeReportBuilder.h>
namespace chip {
namespace app {
CHIP_ERROR AttributeReportBuilder::PrepareAttribute(AttributeReportIBs::Builder & aAttributeReportIBsBuilder,
const ConcreteDataAttributePath & aPath, DataVersion aDataVersion)
{
AttributeReportIB::Builder & attributeReportIBBuilder = aAttributeReportIBsBuilder.CreateAttributeReport();
ReturnErrorOnFailure(aAttributeReportIBsBuilder.GetError());
AttributeDataIB::Builder & attributeDataIBBuilder = attributeReportIBBuilder.CreateAttributeData();
ReturnErrorOnFailure(attributeReportIBBuilder.GetError());
attributeDataIBBuilder.DataVersion(aDataVersion);
AttributePathIB::Builder & attributePathIBBuilder = attributeDataIBBuilder.CreatePath();
ReturnErrorOnFailure(attributeDataIBBuilder.GetError());
attributePathIBBuilder.Endpoint(aPath.mEndpointId).Cluster(aPath.mClusterId).Attribute(aPath.mAttributeId);
if (aPath.mListOp == ConcreteDataAttributePath::ListOperation::AppendItem)
{
// An append to a list (or a data chunk consisting just one list entry that's part of a bigger list) is represented by a
// null list index in the path.
attributePathIBBuilder.ListIndex(DataModel::Nullable<ListIndex>());
}
ReturnErrorOnFailure(attributePathIBBuilder.EndOfAttributePathIB());
return attributeDataIBBuilder.GetError();
}
CHIP_ERROR AttributeReportBuilder::FinishAttribute(AttributeReportIBs::Builder & aAttributeReportIBsBuilder)
{
ReturnErrorOnFailure(aAttributeReportIBsBuilder.GetAttributeReport().GetAttributeData().EndOfAttributeDataIB());
return aAttributeReportIBsBuilder.GetAttributeReport().EndOfAttributeReportIB();
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,78 @@
/*
* Copyright (c) 2021-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/MessageDef/AttributeReportIBs.h>
#include <app/data-model/Encode.h>
#include <app/data-model/FabricScoped.h>
#include <app/data-model/List.h> // So we can encode lists
#include <lib/core/CHIPError.h>
#include <type_traits>
namespace chip {
namespace app {
/**
* The AttributeReportBuilder is a helper class for filling a single report in AttributeReportIBs.
*
* Possible usage of AttributeReportBuilder might be:
*
* AttributeReportBuilder builder;
* ReturnErrorOnFailure(builder.PrepareAttribute(...));
* ReturnErrorOnFailure(builder.Encode(...));
* ReturnErrorOnFailure(builder.FinishAttribute());
*/
class AttributeReportBuilder
{
public:
/**
* PrepareAttribute encodes the "header" part of an attribute report including the path and data version.
* Path will be encoded according to section 10.5.4.3.1 in the spec.
* Note: Only append is supported currently (encode a null list index), other operations won't encode a list index in the
* attribute path field.
* TODO: Add support for encoding a single element in the list (path with a valid list index).
*/
CHIP_ERROR PrepareAttribute(AttributeReportIBs::Builder & aAttributeReportIBs, const ConcreteDataAttributePath & aPath,
DataVersion aDataVersion);
/**
* FinishAttribute encodes the "footer" part of an attribute report (it closes the containers opened in PrepareAttribute)
*/
CHIP_ERROR FinishAttribute(AttributeReportIBs::Builder & aAttributeReportIBs);
/**
* EncodeValue encodes the value field of the report, it should be called exactly once.
*/
template <typename T, std::enable_if_t<!DataModel::IsFabricScoped<T>::value, bool> = true, typename... Ts>
CHIP_ERROR EncodeValue(AttributeReportIBs::Builder & aAttributeReportIBs, TLV::Tag tag, T && item, Ts &&... aArgs)
{
return DataModel::Encode(*(aAttributeReportIBs.GetAttributeReport().GetAttributeData().GetWriter()), tag, item,
std::forward<Ts>(aArgs)...);
}
template <typename T, std::enable_if_t<DataModel::IsFabricScoped<T>::value, bool> = true, typename... Ts>
CHIP_ERROR EncodeValue(AttributeReportIBs::Builder & aAttributeReportIBs, TLV::Tag tag, FabricIndex accessingFabricIndex,
T && item, Ts &&... aArgs)
{
return DataModel::EncodeForRead(*(aAttributeReportIBs.GetAttributeReport().GetAttributeData().GetWriter()), tag,
accessingFabricIndex, item, std::forward<Ts>(aArgs)...);
}
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,76 @@
/*
* Copyright (c) 2021-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <access/SubjectDescriptor.h>
#include <app/data-model/Decode.h>
#include <app/data-model/FabricScoped.h>
#include <lib/core/TLVReader.h>
#include <type_traits>
namespace chip {
namespace app {
class AttributeValueDecoder
{
public:
AttributeValueDecoder(TLV::TLVReader & aReader, const Access::SubjectDescriptor & aSubjectDescriptor) :
mReader(aReader), mSubjectDescriptor(aSubjectDescriptor)
{}
template <typename T, typename std::enable_if_t<!DataModel::IsFabricScoped<T>::value, bool> = true>
CHIP_ERROR Decode(T & aArg)
{
mTriedDecode = true;
return DataModel::Decode(mReader, aArg);
}
template <typename T, typename std::enable_if_t<DataModel::IsFabricScoped<T>::value, bool> = true>
CHIP_ERROR Decode(T & aArg)
{
mTriedDecode = true;
// The WriteRequest comes with no fabric index, this will happen when receiving a write request on a PASE session before
// AddNOC.
VerifyOrReturnError(AccessingFabricIndex() != kUndefinedFabricIndex, CHIP_IM_GLOBAL_STATUS(UnsupportedAccess));
ReturnErrorOnFailure(DataModel::Decode(mReader, aArg));
aArg.SetFabricIndex(AccessingFabricIndex());
return CHIP_NO_ERROR;
}
bool TriedDecode() const { return mTriedDecode; }
/**
* The accessing fabric index for this write interaction.
*/
FabricIndex AccessingFabricIndex() const { return mSubjectDescriptor.fabricIndex; }
/**
* The accessing subject descriptor for this write interaction.
*/
const Access::SubjectDescriptor & GetSubjectDescriptor() const { return mSubjectDescriptor; }
private:
friend class TestOnlyAttributeValueDecoderAccessor;
TLV::TLVReader & mReader;
bool mTriedDecode = false;
const Access::SubjectDescriptor mSubjectDescriptor;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,113 @@
/*
* Copyright (c) 2021-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/AttributeValueEncoder.h>
namespace chip {
namespace app {
namespace {
constexpr uint32_t kEndOfListByteCount = 1;
// 2 bytes: one to end the AttributeDataIB and one to end the AttributeReportIB.
constexpr uint32_t kEndOfAttributeReportIBByteCount = 2;
constexpr TLV::TLVType kAttributeDataIBType = TLV::kTLVType_Structure;
} // anonymous namespace
CHIP_ERROR AttributeValueEncoder::EnsureListStarted()
{
VerifyOrDie(mCurrentEncodingListIndex == kInvalidListIndex);
mEncodingInitialList = (mEncodeState.CurrentEncodingListIndex() == kInvalidListIndex);
if (mEncodingInitialList)
{
// Clear mAllowPartialData flag here since this encode procedure is not atomic.
// The most common error in this function is CHIP_ERROR_NO_MEMORY / CHIP_ERROR_BUFFER_TOO_SMALL, just revert and try
// next time is ok.
mEncodeState.SetAllowPartialData(false);
AttributeReportBuilder builder;
mPath.mListOp = ConcreteDataAttributePath::ListOperation::ReplaceAll;
ReturnErrorOnFailure(builder.PrepareAttribute(mAttributeReportIBsBuilder, mPath, mDataVersion));
auto * attributeDataWriter = mAttributeReportIBsBuilder.GetAttributeReport().GetAttributeData().GetWriter();
TLV::TLVType outerType;
ReturnErrorOnFailure(
attributeDataWriter->StartContainer(TLV::ContextTag(AttributeDataIB::Tag::kData), TLV::kTLVType_Array, outerType));
VerifyOrDie(outerType == kAttributeDataIBType);
// Instead of reserving hardcoded amounts, we could checkpoint the
// writer, encode array end and FinishAttribute, check that this fits,
// measure how much the writer advanced, then restore the checkpoint,
// reserve the measured value, and save it. But that's probably more
// cycles than just reserving this known constant.
ReturnErrorOnFailure(
mAttributeReportIBsBuilder.GetWriter()->ReserveBuffer(kEndOfAttributeReportIBByteCount + kEndOfListByteCount));
mEncodeState.SetCurrentEncodingListIndex(0);
}
else
{
// For all elements in the list, a report with append operation will be generated. This will not be changed during encoding
// of each report since the users cannot access mPath.
mPath.mListOp = ConcreteDataAttributePath::ListOperation::AppendItem;
}
mCurrentEncodingListIndex = 0;
// After encoding the initial list start, the remaining items are atomically encoded into the buffer. Tell report engine to not
// revert partial data.
mEncodeState.SetAllowPartialData(true);
return CHIP_NO_ERROR;
}
void AttributeValueEncoder::EnsureListEnded()
{
if (!mEncodingInitialList)
{
// Nothing to do.
return;
}
// Unreserve the space we reserved just for this. Crash if anything here
// fails, because that would mean that we've corrupted our data, and since
// mEncodeState.mAllowPartialData is true nothing will clean up for us here.
auto * attributeDataWriter = mAttributeReportIBsBuilder.GetAttributeReport().GetAttributeData().GetWriter();
VerifyOrDie(attributeDataWriter->UnreserveBuffer(kEndOfListByteCount + kEndOfAttributeReportIBByteCount) == CHIP_NO_ERROR);
VerifyOrDie(attributeDataWriter->EndContainer(kAttributeDataIBType) == CHIP_NO_ERROR);
AttributeReportBuilder builder;
VerifyOrDie(builder.FinishAttribute(mAttributeReportIBsBuilder) == CHIP_NO_ERROR);
if (!mEncodedAtLeastOneListItem)
{
// If we have not managed to encode any list items, we don't actually
// want to output the single "empty list" IB that will then be followed
// by one-IB-per-item in the next packet. Just have the reporting
// engine roll back our entire attribute and put us in the next packet.
//
// If we succeeded at encoding the whole list (i.e. the list is in fact
// empty and we fit in the packet), mAllowPartialData will be ignored,
// so it's safe to set it to false even if encoding succeeded.
mEncodeState.SetAllowPartialData(false);
}
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,260 @@
/*
* Copyright (c) 2021-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <access/SubjectDescriptor.h>
#include <app/AttributeEncodeState.h>
#include <app/AttributeReportBuilder.h>
#include <app/ConcreteAttributePath.h>
#include <app/MessageDef/AttributeReportIBs.h>
#include <app/data-model/FabricScoped.h>
#include <app/data-model/List.h>
#include <type_traits>
namespace chip {
namespace app {
/**
* The AttributeValueEncoder is a helper class for filling report payloads into AttributeReportIBs.
* The attribute value encoder can be initialized with a AttributeEncodeState for saving and recovering its state between encode
* sessions (chunkings).
*
* When Encode returns recoverable errors (e.g. CHIP_ERROR_NO_MEMORY) the state can be used to initialize the AttributeValueEncoder
* for future use on the same attribute path.
*/
class AttributeValueEncoder
{
public:
class ListEncodeHelper
{
public:
ListEncodeHelper(AttributeValueEncoder & encoder) : mAttributeValueEncoder(encoder) {}
template <typename T, std::enable_if_t<DataModel::IsFabricScoped<T>::value, bool> = true>
CHIP_ERROR Encode(T && aArg) const
{
VerifyOrReturnError(aArg.GetFabricIndex() != kUndefinedFabricIndex, CHIP_ERROR_INVALID_FABRIC_INDEX);
// If we are encoding for a fabric filtered attribute read and the fabric index does not match that present in the
// request, skip encoding this list item.
VerifyOrReturnError(!mAttributeValueEncoder.mIsFabricFiltered ||
aArg.GetFabricIndex() == mAttributeValueEncoder.AccessingFabricIndex(),
CHIP_NO_ERROR);
return mAttributeValueEncoder.EncodeListItem(mAttributeValueEncoder.AccessingFabricIndex(), std::forward<T>(aArg));
}
template <typename T, std::enable_if_t<!DataModel::IsFabricScoped<T>::value, bool> = true>
CHIP_ERROR Encode(T && aArg) const
{
return mAttributeValueEncoder.EncodeListItem(std::forward<T>(aArg));
}
private:
AttributeValueEncoder & mAttributeValueEncoder;
};
AttributeValueEncoder(AttributeReportIBs::Builder & aAttributeReportIBsBuilder, Access::SubjectDescriptor subjectDescriptor,
const ConcreteAttributePath & aPath, DataVersion aDataVersion, bool aIsFabricFiltered = false,
const AttributeEncodeState & aState = AttributeEncodeState()) :
mAttributeReportIBsBuilder(aAttributeReportIBsBuilder),
mSubjectDescriptor(subjectDescriptor), mPath(aPath.mEndpointId, aPath.mClusterId, aPath.mAttributeId),
mDataVersion(aDataVersion), mIsFabricFiltered(aIsFabricFiltered), mEncodeState(aState)
{}
/**
* Encode a single value. This value will not be chunked; it will either be
* entirely encoded or fail to be encoded. Consumers are allowed to make
* either one call to Encode or one call to EncodeList to handle a read.
*/
template <typename... Ts>
CHIP_ERROR Encode(Ts &&... aArgs)
{
mTriedEncode = true;
return EncodeAttributeReportIB(std::forward<Ts>(aArgs)...);
}
/**
* Encode an explicit null value.
*/
CHIP_ERROR EncodeNull()
{
// Doesn't matter what type Nullable we use here.
return Encode(DataModel::Nullable<uint8_t>());
}
/**
* Encode an explicit empty list.
*/
CHIP_ERROR EncodeEmptyList()
{
// Doesn't matter what type List we use here.
return Encode(DataModel::List<uint8_t>());
}
/**
* aCallback is expected to take a const auto & argument and Encode() on it as many times as needed to encode all the list
* elements one by one. If any of those Encode() calls returns failure, aCallback must stop encoding and return failure. When
* all items are encoded aCallback is expected to return success.
*
* aCallback may not be called. Consumers must not assume it will be called.
*
* When EncodeList returns an error, the consumers must abort the encoding, and return the exact error to the caller.
*
* TODO: Can we hold a error state in the AttributeValueEncoder itself so functions in ember-compatibility-functions don't have
* to rely on the above assumption?
*
* Consumers are allowed to make either one call to EncodeList or one call to Encode to handle a read.
*
*/
template <typename ListGenerator>
CHIP_ERROR EncodeList(ListGenerator aCallback)
{
mTriedEncode = true;
// Spec 10.5.4.3.1, 10.5.4.6 (Replace a list w/ Multiple IBs)
// EmptyList acts as the beginning of the whole array type attribute report.
// An empty list is encoded iff both mCurrentEncodingListIndex and mEncodeState.mCurrentEncodingListIndex are invalid
// values. After encoding the empty list, mEncodeState.mCurrentEncodingListIndex and mCurrentEncodingListIndex are set to 0.
ReturnErrorOnFailure(EnsureListStarted());
CHIP_ERROR err = aCallback(ListEncodeHelper(*this));
// Even if encoding list items failed, make sure we EnsureListEnded().
// Since we encode list items atomically, in the case when we just
// didn't fit the next item we want to make sure our list is properly
// ended before the reporting engine starts chunking.
EnsureListEnded();
if (err == CHIP_NO_ERROR)
{
// The Encode procedure finished without any error, clear the state.
mEncodeState.Reset();
}
return err;
}
bool TriedEncode() const { return mTriedEncode; }
const Access::SubjectDescriptor & GetSubjectDescriptor() const { return mSubjectDescriptor; }
/**
* The accessing fabric index for this read or subscribe interaction.
*/
FabricIndex AccessingFabricIndex() const { return GetSubjectDescriptor().fabricIndex; }
/**
* AttributeValueEncoder is a short lived object, and the state is persisted by mEncodeState and restored by constructor.
*/
const AttributeEncodeState & GetState() const { return mEncodeState; }
private:
// We made EncodeListItem() private, and ListEncoderHelper will expose it by Encode()
friend class ListEncodeHelper;
friend class TestOnlyAttributeValueEncoderAccessor;
template <typename... Ts>
CHIP_ERROR EncodeListItem(Ts &&... aArgs)
{
// EncodeListItem must be called after EnsureListStarted(), thus mCurrentEncodingListIndex and
// mEncodeState.mCurrentEncodingListIndex are not invalid values.
if (mCurrentEncodingListIndex < mEncodeState.CurrentEncodingListIndex())
{
// We have encoded this element in previous chunks, skip it.
mCurrentEncodingListIndex++;
return CHIP_NO_ERROR;
}
TLV::TLVWriter backup;
mAttributeReportIBsBuilder.Checkpoint(backup);
CHIP_ERROR err;
if (mEncodingInitialList)
{
// Just encode a single item, with an anonymous tag.
AttributeReportBuilder builder;
err = builder.EncodeValue(mAttributeReportIBsBuilder, TLV::AnonymousTag(), std::forward<Ts>(aArgs)...);
}
else
{
err = EncodeAttributeReportIB(std::forward<Ts>(aArgs)...);
}
if (err != CHIP_NO_ERROR)
{
// For list chunking, ReportEngine should not rollback the buffer when CHIP_ERROR_NO_MEMORY or similar error occurred.
// However, the error might be raised in the middle of encoding procedure, then the buffer may contain partial data,
// unclosed containers etc. This line clears all possible partial data and makes EncodeListItem is atomic.
mAttributeReportIBsBuilder.Rollback(backup);
return err;
}
mCurrentEncodingListIndex++;
mEncodeState.SetCurrentEncodingListIndex(mCurrentEncodingListIndex);
mEncodedAtLeastOneListItem = true;
return CHIP_NO_ERROR;
}
/**
* Builds a single AttributeReportIB in AttributeReportIBs. The caller is
* responsible for setting up mPath correctly.
*
* In particular, when we are encoding a single element in the list, mPath
* must indicate a null list index to represent an "append" operation.
* operation.
*/
template <typename... Ts>
CHIP_ERROR EncodeAttributeReportIB(Ts &&... aArgs)
{
AttributeReportBuilder builder;
ReturnErrorOnFailure(builder.PrepareAttribute(mAttributeReportIBsBuilder, mPath, mDataVersion));
ReturnErrorOnFailure(builder.EncodeValue(mAttributeReportIBsBuilder, TLV::ContextTag(AttributeDataIB::Tag::kData),
std::forward<Ts>(aArgs)...));
return builder.FinishAttribute(mAttributeReportIBsBuilder);
}
/**
* EnsureListStarted sets our mCurrentEncodingListIndex to 0, and:
*
* * If we are just starting the list, gets us ready to encode list items.
*
* * If we are continuing a chunked list, guarantees that mPath.mListOp is
* AppendItem after it returns.
*/
CHIP_ERROR EnsureListStarted();
/**
* EnsureListEnded writes out the end of the list and our attribute data IB,
* if we were encoding our initial list
*/
void EnsureListEnded();
AttributeReportIBs::Builder & mAttributeReportIBsBuilder;
const Access::SubjectDescriptor mSubjectDescriptor;
ConcreteDataAttributePath mPath;
DataVersion mDataVersion;
bool mTriedEncode = false;
bool mIsFabricFiltered = false;
// mEncodingInitialList is true if we're encoding a list and we have not
// started chunking it yet, so we're encoding a single attribute report IB
// for the whole list, not one per item.
bool mEncodingInitialList = false;
// mEncodedAtLeastOneListItem becomes true once we successfully encode a list item.
bool mEncodedAtLeastOneListItem = false;
ListIndex mCurrentEncodingListIndex = kInvalidListIndex;
AttributeEncodeState mEncodeState;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,267 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "lib/core/TLV.h"
#include "lib/core/TLVTags.h"
#include "lib/core/TLVTypes.h"
#include "protocols/interaction_model/Constants.h"
#include "system/SystemPacketBuffer.h"
#include "system/TLVPacketBufferBackingStore.h"
#include <app/BufferedReadCallback.h>
#include <app/InteractionModelEngine.h>
#include <lib/support/ScopedBuffer.h>
namespace chip {
namespace app {
void BufferedReadCallback::OnReportBegin()
{
mCallback.OnReportBegin();
}
void BufferedReadCallback::OnReportEnd()
{
CHIP_ERROR err = DispatchBufferedData(mBufferedPath, StatusIB(), true);
if (err != CHIP_NO_ERROR)
{
mCallback.OnError(err);
return;
}
mCallback.OnReportEnd();
}
CHIP_ERROR BufferedReadCallback::GenerateListTLV(TLV::ScopedBufferTLVReader & aReader)
{
TLV::TLVType outerType;
Platform::ScopedMemoryBuffer<uint8_t> backingBuffer;
//
// To generate the final reconstituted list, we need to allocate a contiguous
// buffer than can hold the entirety of its contents. To do so, we need to figure out
// how big a buffer to allocate. This requires walking the buffered list items and computing their TLV sizes,
// summing them all up and adding a bit of slop to account for the TLV array the list elements will go into.
//
// The alternative was to use a PacketBufferTLVWriter backed by chained packet buffers to
// write out the list - this would have removed the need for this first pass. However,
// we cannot actually back a TLVReader with a chained buffer since that violates the ability
// for us to create readers off-of readers. Each reader would assume exclusive ownership of the chained
// buffer and mutate the state within TLVPacketBufferBackingStore, preventing shared use.
//
// To avoid that, a single contiguous buffer is the best likely approach for now.
//
size_t totalBufSize = 0;
for (const auto & packetBuffer : mBufferedList)
{
totalBufSize += packetBuffer->TotalLength();
}
//
// Size of the start container and end container are just 1 byte each, but, let's just be safe.
//
totalBufSize += 4;
backingBuffer.Calloc(totalBufSize);
VerifyOrReturnError(backingBuffer.Get() != nullptr, CHIP_ERROR_NO_MEMORY);
TLV::ScopedBufferTLVWriter writer(std::move(backingBuffer), totalBufSize);
ReturnErrorOnFailure(writer.StartContainer(TLV::AnonymousTag(), TLV::kTLVType_Array, outerType));
for (auto & bufHandle : mBufferedList)
{
System::PacketBufferTLVReader reader;
reader.Init(std::move(bufHandle));
ReturnErrorOnFailure(reader.Next());
ReturnErrorOnFailure(writer.CopyElement(TLV::AnonymousTag(), reader));
}
ReturnErrorOnFailure(writer.EndContainer(outerType));
writer.Finalize(backingBuffer);
aReader.Init(std::move(backingBuffer), totalBufSize);
return CHIP_NO_ERROR;
}
CHIP_ERROR BufferedReadCallback::BufferListItem(TLV::TLVReader & reader)
{
System::PacketBufferTLVWriter writer;
System::PacketBufferHandle handle;
//
// We conservatively allocate a packet buffer as big as an IPv6 MTU (since we're buffering
// data received over the wire, which should always fit within that).
//
// We could have snapshotted the reader at its current position, advanced it past the current element
// and computed the delta in its read point to figure out the size of the element before allocating
// our target buffer. However, the reader's current position is already set past the control octet
// and the tag. Consequently, the computed size is always going to omit the sizes of these two parts of the
// TLV element. Since the tag can vary in size, for now, let's just do the safe thing. In the future, if this is a problem,
// we can improve this.
//
handle = System::PacketBufferHandle::New(chip::app::kMaxSecureSduLengthBytes);
VerifyOrReturnError(!handle.IsNull(), CHIP_ERROR_NO_MEMORY);
writer.Init(std::move(handle), false);
ReturnErrorOnFailure(writer.CopyElement(TLV::AnonymousTag(), reader));
ReturnErrorOnFailure(writer.Finalize(&handle));
// Compact the buffer down to a more reasonably sized packet buffer
// if we can.
//
handle.RightSize();
mBufferedList.push_back(std::move(handle));
return CHIP_NO_ERROR;
}
CHIP_ERROR BufferedReadCallback::BufferData(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData)
{
if (aPath.mListOp == ConcreteDataAttributePath::ListOperation::ReplaceAll)
{
TLV::TLVType outerContainer;
VerifyOrReturnError(apData->GetType() == TLV::kTLVType_Array, CHIP_ERROR_INVALID_TLV_ELEMENT);
mBufferedList.clear();
ReturnErrorOnFailure(apData->EnterContainer(outerContainer));
CHIP_ERROR err;
while ((err = apData->Next()) == CHIP_NO_ERROR)
{
ReturnErrorOnFailure(BufferListItem(*apData));
}
if (err == CHIP_END_OF_TLV)
{
err = CHIP_NO_ERROR;
}
ReturnErrorOnFailure(err);
ReturnErrorOnFailure(apData->ExitContainer(outerContainer));
}
else if (aPath.mListOp == ConcreteDataAttributePath::ListOperation::AppendItem)
{
ReturnErrorOnFailure(BufferListItem(*apData));
}
return CHIP_NO_ERROR;
}
CHIP_ERROR BufferedReadCallback::DispatchBufferedData(const ConcreteAttributePath & aPath, const StatusIB & aStatusIB,
bool aEndOfReport)
{
if (aPath == mBufferedPath)
{
//
// If we encountered the same list again and it's not the last DataIB, then
// we need to continue to buffer up this list's data, so return immediately without dispatching
// the existing buffered up contents.
//
if (!aEndOfReport)
{
return CHIP_NO_ERROR;
}
//
// If we had previously buffered up data for this list and now we have encountered
// an error for this list, that error takes precedence and the buffered data is now
// rendered invalid. Return immediately without dispatching the existing buffered up contents.
//
if (aStatusIB.mStatus != Protocols::InteractionModel::Status::Success)
{
return CHIP_NO_ERROR;
}
}
if (!mBufferedPath.IsListOperation())
{
return CHIP_NO_ERROR;
}
StatusIB statusIB;
TLV::ScopedBufferTLVReader reader;
ReturnErrorOnFailure(GenerateListTLV(reader));
//
// Update the list operation to now reflect the delivery of the entire list
// i.e a replace all operation.
//
mBufferedPath.mListOp = ConcreteDataAttributePath::ListOperation::ReplaceAll;
//
// Advance the reader forward to the list itself
//
ReturnErrorOnFailure(reader.Next());
mCallback.OnAttributeData(mBufferedPath, &reader, statusIB);
//
// Clear out our buffered contents to free up allocated buffers, and reset the buffered path.
//
mBufferedList.clear();
mBufferedPath = ConcreteDataAttributePath();
return CHIP_NO_ERROR;
}
void BufferedReadCallback::OnAttributeData(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData,
const StatusIB & aStatus)
{
CHIP_ERROR err;
//
// First, let's dispatch to our registered callback any buffered up list data from previous calls.
//
err = DispatchBufferedData(aPath, aStatus);
SuccessOrExit(err);
//
// We buffer up list data (only if the status was successful)
//
if (aPath.IsListOperation() && aStatus.mStatus == Protocols::InteractionModel::Status::Success)
{
err = BufferData(aPath, apData);
SuccessOrExit(err);
}
else
{
mCallback.OnAttributeData(aPath, apData, aStatus);
}
//
// Update our latched buffered path.
//
mBufferedPath = aPath;
exit:
if (err != CHIP_NO_ERROR)
{
mCallback.OnError(err);
}
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,138 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "lib/core/TLV.h"
#include "system/SystemPacketBuffer.h"
#include "system/TLVPacketBufferBackingStore.h"
#include <app/AppConfig.h>
#include <app/AttributePathParams.h>
#include <app/ReadClient.h>
#include <vector>
#if CHIP_CONFIG_ENABLE_READ_CLIENT
namespace chip {
namespace app {
/*
* This is an adapter that intercepts calls that deliver data from the ReadClient,
* selectively buffers up list chunks in TLV and reconstitutes them into a singular, contiguous TLV array
* upon completion of delivery of all chunks. This is then delivered to a compliant ReadClient::Callback
* without any awareness on their part that chunking happened.
*
*/
class BufferedReadCallback : public ReadClient::Callback
{
public:
BufferedReadCallback(Callback & callback) : mCallback(callback) {}
private:
/*
* Generates the reconsistuted TLV array from the stored individual list elements
*/
CHIP_ERROR GenerateListTLV(TLV::ScopedBufferTLVReader & reader);
/*
* Dispatch any buffered list data if we need to. Buffered data will only be dispatched if:
* 1. The path provided in aPath is different from the buffered path being tracked internally AND the type of data
* in the buffer is list data
*
* OR
*
* 2. The path provided in aPath is similar to what is buffered but we've hit the end of the report.
*
*/
CHIP_ERROR DispatchBufferedData(const ConcreteAttributePath & aPath, const StatusIB & aStatus, bool aEndOfReport = false);
/*
* Buffer up list data as they arrive.
*/
CHIP_ERROR BufferData(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apReader);
//
// ReadClient::Callback
//
void OnReportBegin() override;
void OnReportEnd() override;
void OnAttributeData(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData, const StatusIB & aStatus) override;
void OnError(CHIP_ERROR aError) override
{
mBufferedList.clear();
return mCallback.OnError(aError);
}
void OnEventData(const EventHeader & aEventHeader, TLV::TLVReader * apData, const StatusIB * apStatus) override
{
return mCallback.OnEventData(aEventHeader, apData, apStatus);
}
void OnDone(ReadClient * apReadClient) override { return mCallback.OnDone(apReadClient); }
void OnSubscriptionEstablished(SubscriptionId aSubscriptionId) override
{
mCallback.OnSubscriptionEstablished(aSubscriptionId);
}
CHIP_ERROR OnResubscriptionNeeded(ReadClient * apReadClient, CHIP_ERROR aTerminationCause) override
{
return mCallback.OnResubscriptionNeeded(apReadClient, aTerminationCause);
}
void OnDeallocatePaths(chip::app::ReadPrepareParams && aReadPrepareParams) override
{
return mCallback.OnDeallocatePaths(std::move(aReadPrepareParams));
}
virtual CHIP_ERROR OnUpdateDataVersionFilterList(DataVersionFilterIBs::Builder & aDataVersionFilterIBsBuilder,
const Span<AttributePathParams> & aAttributePaths,
bool & aEncodedDataVersionList) override
{
return mCallback.OnUpdateDataVersionFilterList(aDataVersionFilterIBsBuilder, aAttributePaths, aEncodedDataVersionList);
}
virtual CHIP_ERROR GetHighestReceivedEventNumber(Optional<EventNumber> & aEventNumber) override
{
return mCallback.GetHighestReceivedEventNumber(aEventNumber);
}
void OnUnsolicitedMessageFromPublisher(ReadClient * apReadClient) override
{
return mCallback.OnUnsolicitedMessageFromPublisher(apReadClient);
}
void OnCASESessionEstablished(const SessionHandle & aSession, ReadPrepareParams & aSubscriptionParams) override
{
return mCallback.OnCASESessionEstablished(aSession, aSubscriptionParams);
}
/*
* Given a reader positioned at a list element, allocate a packet buffer, copy the list item where
* the reader is positioned into that buffer and add it to our buffered list for tracking.
*
* This should be called in list index order starting from the lowest index that needs to be buffered.
*
*/
CHIP_ERROR BufferListItem(TLV::TLVReader & reader);
ConcreteDataAttributePath mBufferedPath;
std::vector<System::PacketBufferHandle> mBufferedList;
Callback & mCallback;
};
} // namespace app
} // namespace chip
#endif // CHIP_CONFIG_ENABLE_READ_CLIENT

View File

@@ -0,0 +1,63 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/CASEClient.h>
#include <messaging/ReliableMessageProtocolConfig.h>
namespace chip {
void CASEClient::SetRemoteMRPIntervals(const ReliableMessageProtocolConfig & remoteMRPConfig)
{
mCASESession.SetRemoteMRPConfig(remoteMRPConfig);
}
const ReliableMessageProtocolConfig & CASEClient::GetRemoteMRPIntervals()
{
return mCASESession.GetRemoteMRPConfig();
}
CHIP_ERROR CASEClient::EstablishSession(const CASEClientInitParams & params, const ScopedNodeId & peer,
const Transport::PeerAddress & peerAddress,
const ReliableMessageProtocolConfig & remoteMRPConfig,
SessionEstablishmentDelegate * delegate)
{
VerifyOrReturnError(params.fabricTable != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
// Create a UnauthenticatedSession for CASE pairing.
Optional<SessionHandle> session = params.sessionManager->CreateUnauthenticatedSession(peerAddress, remoteMRPConfig);
VerifyOrReturnError(session.HasValue(), CHIP_ERROR_NO_MEMORY);
// Allocate the exchange immediately before calling CASESession::EstablishSession.
//
// CASESession::EstablishSession takes ownership of the exchange and will
// free it on error, but can only do this if it is actually called.
// Allocating the exchange context right before calling EstablishSession
// ensures that if allocation succeeds, CASESession has taken ownership.
Messaging::ExchangeContext * exchange = params.exchangeMgr->NewContext(session.Value(), &mCASESession);
VerifyOrReturnError(exchange != nullptr, CHIP_ERROR_INTERNAL);
const Optional<ReliableMessageProtocolConfig> & mrpLocalConfig =
params.mrpLocalConfig.HasValue() ? params.mrpLocalConfig : GetLocalMRPConfig();
mCASESession.SetGroupDataProvider(params.groupDataProvider);
ReturnErrorOnFailure(mCASESession.EstablishSession(*params.sessionManager, params.fabricTable, peer, exchange,
params.sessionResumptionStorage, params.certificateValidityPolicy, delegate,
mrpLocalConfig));
return CHIP_NO_ERROR;
}
}; // namespace chip

View File

@@ -0,0 +1,71 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <credentials/GroupDataProvider.h>
#include <messaging/ExchangeMgr.h>
#include <messaging/ReliableMessageProtocolConfig.h>
#include <protocols/secure_channel/CASESession.h>
namespace chip {
class CASEClient;
struct CASEClientInitParams
{
SessionManager * sessionManager = nullptr;
SessionResumptionStorage * sessionResumptionStorage = nullptr;
Credentials::CertificateValidityPolicy * certificateValidityPolicy = nullptr;
Messaging::ExchangeManager * exchangeMgr = nullptr;
FabricTable * fabricTable = nullptr;
Credentials::GroupDataProvider * groupDataProvider = nullptr;
// mrpLocalConfig should not generally be set to anything other than
// NullOptional. Doing that can lead to different parts of the system
// claiming different MRP parameters for the same node.
Optional<ReliableMessageProtocolConfig> mrpLocalConfig = NullOptional;
CHIP_ERROR Validate() const
{
// sessionResumptionStorage can be nullptr when resumption is disabled.
// certificateValidityPolicy is optional, too.
ReturnErrorCodeIf(sessionManager == nullptr, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorCodeIf(exchangeMgr == nullptr, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorCodeIf(fabricTable == nullptr, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorCodeIf(groupDataProvider == nullptr, CHIP_ERROR_INCORRECT_STATE);
return CHIP_NO_ERROR;
}
};
class DLL_EXPORT CASEClient
{
public:
void SetRemoteMRPIntervals(const ReliableMessageProtocolConfig & remoteMRPConfig);
const ReliableMessageProtocolConfig & GetRemoteMRPIntervals();
CHIP_ERROR EstablishSession(const CASEClientInitParams & params, const ScopedNodeId & peer,
const Transport::PeerAddress & peerAddress, const ReliableMessageProtocolConfig & remoteMRPConfig,
SessionEstablishmentDelegate * delegate);
private:
CASESession mCASESession;
};
} // namespace chip

View File

@@ -0,0 +1,49 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CASEClient.h>
#include <lib/support/Pool.h>
namespace chip {
class CASEClientPoolDelegate
{
public:
virtual CASEClient * Allocate() = 0;
virtual void Release(CASEClient * client) = 0;
virtual ~CASEClientPoolDelegate() {}
};
template <size_t N>
class CASEClientPool : public CASEClientPoolDelegate
{
public:
~CASEClientPool() override { mClientPool.ReleaseAll(); }
CASEClient * Allocate() override { return mClientPool.CreateObject(); }
void Release(CASEClient * client) override { mClientPool.ReleaseObject(client); }
private:
ObjectPool<CASEClient, N> mClientPool;
};
}; // namespace chip

View File

@@ -0,0 +1,199 @@
/*
*
* Copyright (c) 2020-2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/CASESessionManager.h>
#include <lib/address_resolve/AddressResolve.h>
namespace chip {
CHIP_ERROR CASESessionManager::Init(chip::System::Layer * systemLayer, const CASESessionManagerConfig & params)
{
ReturnErrorOnFailure(params.sessionInitParams.Validate());
mConfig = params;
params.sessionInitParams.exchangeMgr->GetReliableMessageMgr()->RegisterSessionUpdateDelegate(this);
return AddressResolve::Resolver::Instance().Init(systemLayer);
}
void CASESessionManager::Shutdown()
{
AddressResolve::Resolver::Instance().Shutdown();
}
void CASESessionManager::FindOrEstablishSession(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection,
Callback::Callback<OnDeviceConnectionFailure> * onFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount, Callback::Callback<OnDeviceConnectionRetry> * onRetry,
#endif // CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
TransportPayloadCapability transportPayloadCapability)
{
FindOrEstablishSessionHelper(peerId, onConnection, onFailure, nullptr,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
attemptCount, onRetry,
#endif
transportPayloadCapability);
}
void CASESessionManager::FindOrEstablishSession(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection,
Callback::Callback<OperationalSessionSetup::OnSetupFailure> * onSetupFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount, Callback::Callback<OnDeviceConnectionRetry> * onRetry,
#endif
TransportPayloadCapability transportPayloadCapability)
{
FindOrEstablishSessionHelper(peerId, onConnection, nullptr, onSetupFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
attemptCount, onRetry,
#endif
transportPayloadCapability);
}
void CASESessionManager::FindOrEstablishSession(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection,
std::nullptr_t,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount, Callback::Callback<OnDeviceConnectionRetry> * onRetry,
#endif
TransportPayloadCapability transportPayloadCapability)
{
FindOrEstablishSessionHelper(peerId, onConnection, nullptr, nullptr,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
attemptCount, onRetry,
#endif
transportPayloadCapability);
}
void CASESessionManager::FindOrEstablishSessionHelper(const ScopedNodeId & peerId,
Callback::Callback<OnDeviceConnected> * onConnection,
Callback::Callback<OnDeviceConnectionFailure> * onFailure,
Callback::Callback<OperationalSessionSetup::OnSetupFailure> * onSetupFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount, Callback::Callback<OnDeviceConnectionRetry> * onRetry,
#endif
TransportPayloadCapability transportPayloadCapability)
{
ChipLogDetail(CASESessionManager, "FindOrEstablishSession: PeerId = [%d:" ChipLogFormatX64 "]", peerId.GetFabricIndex(),
ChipLogValueX64(peerId.GetNodeId()));
bool forAddressUpdate = false;
OperationalSessionSetup * session = FindExistingSessionSetup(peerId, forAddressUpdate);
if (session == nullptr)
{
ChipLogDetail(CASESessionManager, "FindOrEstablishSession: No existing OperationalSessionSetup instance found");
session = mConfig.sessionSetupPool->Allocate(mConfig.sessionInitParams, mConfig.clientPool, peerId, this);
if (session == nullptr)
{
if (onFailure != nullptr)
{
onFailure->mCall(onFailure->mContext, peerId, CHIP_ERROR_NO_MEMORY);
}
if (onSetupFailure != nullptr)
{
OperationalSessionSetup::ConnectionFailureInfo failureInfo(peerId, CHIP_ERROR_NO_MEMORY,
SessionEstablishmentStage::kUnknown);
onSetupFailure->mCall(onSetupFailure->mContext, failureInfo);
}
return;
}
}
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
session->UpdateAttemptCount(attemptCount);
if (onRetry)
{
session->AddRetryHandler(onRetry);
}
#endif // CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
if (onFailure != nullptr)
{
session->Connect(onConnection, onFailure, transportPayloadCapability);
}
if (onSetupFailure != nullptr)
{
session->Connect(onConnection, onSetupFailure, transportPayloadCapability);
}
}
void CASESessionManager::ReleaseSessionsForFabric(FabricIndex fabricIndex)
{
mConfig.sessionSetupPool->ReleaseAllSessionSetupsForFabric(fabricIndex);
}
void CASESessionManager::ReleaseAllSessions()
{
mConfig.sessionSetupPool->ReleaseAllSessionSetup();
}
CHIP_ERROR CASESessionManager::GetPeerAddress(const ScopedNodeId & peerId, Transport::PeerAddress & addr,
TransportPayloadCapability transportPayloadCapability)
{
ReturnErrorOnFailure(mConfig.sessionInitParams.Validate());
auto optionalSessionHandle = FindExistingSession(peerId, transportPayloadCapability);
ReturnErrorCodeIf(!optionalSessionHandle.HasValue(), CHIP_ERROR_NOT_CONNECTED);
addr = optionalSessionHandle.Value()->AsSecureSession()->GetPeerAddress();
return CHIP_NO_ERROR;
}
void CASESessionManager::UpdatePeerAddress(ScopedNodeId peerId)
{
bool forAddressUpdate = true;
OperationalSessionSetup * session = FindExistingSessionSetup(peerId, forAddressUpdate);
if (session == nullptr)
{
ChipLogDetail(CASESessionManager, "UpdatePeerAddress: No existing OperationalSessionSetup instance found");
session = mConfig.sessionSetupPool->Allocate(mConfig.sessionInitParams, mConfig.clientPool, peerId, this);
if (session == nullptr)
{
ChipLogDetail(CASESessionManager, "UpdatePeerAddress: Failed to allocate OperationalSessionSetup instance");
return;
}
}
else
{
ChipLogDetail(CASESessionManager,
"UpdatePeerAddress: Found existing OperationalSessionSetup instance for peerId[" ChipLogFormatX64 "]",
ChipLogValueX64(peerId.GetNodeId()));
}
session->PerformAddressUpdate();
}
OperationalSessionSetup * CASESessionManager::FindExistingSessionSetup(const ScopedNodeId & peerId, bool forAddressUpdate) const
{
return mConfig.sessionSetupPool->FindSessionSetup(peerId, forAddressUpdate);
}
Optional<SessionHandle> CASESessionManager::FindExistingSession(const ScopedNodeId & peerId,
const TransportPayloadCapability transportPayloadCapability) const
{
return mConfig.sessionInitParams.sessionManager->FindSecureSessionForNode(
peerId, MakeOptional(Transport::SecureSession::Type::kCASE), transportPayloadCapability);
}
void CASESessionManager::ReleaseSession(OperationalSessionSetup * session)
{
if (session != nullptr)
{
mConfig.sessionSetupPool->Release(session);
}
}
} // namespace chip

View File

@@ -0,0 +1,184 @@
/*
*
* Copyright (c) 2020-2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CASEClientPool.h>
#include <app/OperationalSessionSetup.h>
#include <app/OperationalSessionSetupPool.h>
#include <lib/core/CHIPConfig.h>
#include <lib/core/CHIPCore.h>
#include <lib/support/Pool.h>
#include <platform/CHIPDeviceLayer.h>
#include <transport/SessionDelegate.h>
#include <transport/SessionManager.h>
#include <transport/SessionUpdateDelegate.h>
namespace chip {
struct CASESessionManagerConfig
{
CASEClientInitParams sessionInitParams;
CASEClientPoolDelegate * clientPool = nullptr;
OperationalSessionSetupPoolDelegate * sessionSetupPool = nullptr;
};
/**
* This class provides the following
* 1. Manage a pool of operational device proxy objects for peer nodes that have active message exchange with the local node.
* 2. The pool contains atmost one device proxy object for a given peer node.
* 3. API to lookup an existing proxy object, or allocate a new one by triggering session establishment with the peer node.
* 4. During session establishment, trigger node ID resolution (if needed), and update the DNS-SD cache (if resolution is
* successful)
*/
class CASESessionManager : public OperationalSessionReleaseDelegate, public SessionUpdateDelegate
{
public:
CASESessionManager() = default;
virtual ~CASESessionManager()
{
if (mConfig.sessionInitParams.Validate() == CHIP_NO_ERROR)
{
mConfig.sessionInitParams.exchangeMgr->GetReliableMessageMgr()->RegisterSessionUpdateDelegate(nullptr);
}
}
CHIP_ERROR Init(chip::System::Layer * systemLayer, const CASESessionManagerConfig & params);
void Shutdown();
/**
* Find an existing session for the given node ID, or trigger a new session
* request.
*
* The caller can optionally provide `onConnection` and `onFailure` callback
* objects. If provided, these will be used to inform the caller about
* successful or failed connection establishment.
*
* If the connection is already established, the `onConnection` callback
* will be immediately called, before FindOrEstablishSession returns.
*
* The `onFailure` callback may be called before the FindOrEstablishSession
* call returns, for error cases that are detected synchronously.
*
* attemptCount can be used to automatically retry multiple times if session
* setup is not successful.
*/
void FindOrEstablishSession(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection,
Callback::Callback<OnDeviceConnectionFailure> * onFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount = 1, Callback::Callback<OnDeviceConnectionRetry> * onRetry = nullptr,
#endif // CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
TransportPayloadCapability transportPayloadCapability = TransportPayloadCapability::kMRPPayload);
/**
* Find an existing session for the given node ID or trigger a new session request.
*
* The caller can optionally provide `onConnection` and `onSetupFailure`
* callback objects. If provided, these will be used to inform the caller about successful or
* failed connection establishment.
*
* If the connection is already established, the `onConnection` callback will be immediately called,
* before `FindOrEstablishSession` returns.
*
* The `onSetupFailure` callback may be called before the `FindOrEstablishSession`
* call returns, for error cases that are detected synchronously.
*
* The `attemptCount` parameter can be used to automatically retry multiple times if session setup is
* not successful.
*
* @param peerId The node ID to find or establish a session with.
* @param onConnection A callback to be called upon successful connection establishment.
* @param onSetupFailure A callback to be called upon an extended device connection failure.
* @param attemptCount The number of retry attempts if session setup fails (default is 1).
* @param onRetry A callback to be called on a retry attempt (enabled by a config flag).
* @param transportPayloadCapability An indicator of what payload types the session needs to be able to transport.
*/
void FindOrEstablishSession(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection,
Callback::Callback<OperationalSessionSetup::OnSetupFailure> * onSetupFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount = 1, Callback::Callback<OnDeviceConnectionRetry> * onRetry = nullptr,
#endif // CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
TransportPayloadCapability transportPayloadCapability = TransportPayloadCapability::kMRPPayload);
/**
* Find an existing session for the given node ID or trigger a new session request.
*
* The caller can optionally provide `onConnection`
* callback objects. If provided, these will be used to inform the caller about successful connection establishment.
*
* If the connection is already established, the `onConnection` callback will be immediately called,
* before `FindOrEstablishSession` returns.
*
* The `attemptCount` parameter can be used to automatically retry multiple times if session setup is
* not successful.
*
* This function allows passing 'nullptr' for the error handler to compile, which is useful in scenarios where error
* handling is not needed.
*
* @param peerId The node ID to find or establish a session with.
* @param onConnection A callback to be called upon successful connection establishment.
* @param attemptCount The number of retry attempts if session setup fails (default is 1).
* @param onRetry A callback to be called on a retry attempt (enabled by a config flag).
* @param transportPayloadCapability An indicator of what payload types the session needs to be able to transport.
*/
void FindOrEstablishSession(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection, std::nullptr_t,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount = 1, Callback::Callback<OnDeviceConnectionRetry> * onRetry = nullptr,
#endif // CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
TransportPayloadCapability transportPayloadCapability = TransportPayloadCapability::kMRPPayload);
void ReleaseSessionsForFabric(FabricIndex fabricIndex);
void ReleaseAllSessions();
/**
* This API returns the address for the given node ID.
* If the CASESessionManager is configured with a DNS-SD cache, the cache is looked up
* for the node ID.
* If the DNS-SD cache is not available, the CASESessionManager looks up the list for
* an ongoing session with the peer node. If the session doesn't exist, the API will return
* `CHIP_ERROR_NOT_CONNECTED` error.
*/
CHIP_ERROR GetPeerAddress(const ScopedNodeId & peerId, Transport::PeerAddress & addr,
TransportPayloadCapability transportPayloadCapability = TransportPayloadCapability::kMRPPayload);
//////////// OperationalSessionReleaseDelegate Implementation ///////////////
void ReleaseSession(OperationalSessionSetup * device) override;
//////////// SessionUpdateDelegate Implementation ///////////////
void UpdatePeerAddress(ScopedNodeId peerId) override;
private:
OperationalSessionSetup * FindExistingSessionSetup(const ScopedNodeId & peerId, bool forAddressUpdate = false) const;
Optional<SessionHandle> FindExistingSession(
const ScopedNodeId & peerId,
const TransportPayloadCapability transportPayloadCapability = TransportPayloadCapability::kMRPPayload) const;
void FindOrEstablishSessionHelper(const ScopedNodeId & peerId, Callback::Callback<OnDeviceConnected> * onConnection,
Callback::Callback<OnDeviceConnectionFailure> * onFailure,
Callback::Callback<OperationalSessionSetup::OnSetupFailure> * onSetupFailure,
#if CHIP_DEVICE_CONFIG_ENABLE_AUTOMATIC_CASE_RETRIES
uint8_t attemptCount, Callback::Callback<OnDeviceConnectionRetry> * onRetry,
#endif
TransportPayloadCapability transportPayloadCapability);
CASESessionManagerConfig mConfig;
};
} // namespace chip

View File

@@ -0,0 +1,92 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/ChunkedWriteCallback.h>
namespace chip {
namespace app {
void ChunkedWriteCallback::OnResponse(const WriteClient * apWriteClient, const ConcreteDataAttributePath & aPath, StatusIB aStatus)
{
// We may send a chunked list. To make the behavior consistent whether a list is being chunked or not,
// we merge the write responses for a chunked list here and provide our consumer with a single status response.
if (mProcessingAttributePath.HasValue())
{
// This is not the first write response.
if (IsAppendingToLastItem(aPath))
{
// This is a response on the same path as what we already have stored. Report the first
// failure status we encountered, and ignore subsequent ones.
if (mAttributeStatus.IsSuccess())
{
mAttributeStatus = aStatus;
}
return;
}
// This is a response to another attribute write. Report the final result of last attribute write.
callback->OnResponse(apWriteClient, mProcessingAttributePath.Value(), mAttributeStatus);
}
// This is the first report for a new attribute. We assume it will never be a list item operation.
if (aPath.IsListItemOperation())
{
aStatus = StatusIB(CHIP_ERROR_INCORRECT_STATE);
}
mProcessingAttributePath.SetValue(aPath);
mAttributeStatus = aStatus;
// For the last status in the response, we will call the application callback in OnDone()
}
void ChunkedWriteCallback::OnError(const WriteClient * apWriteClient, CHIP_ERROR aError)
{
callback->OnError(apWriteClient, aError);
}
void ChunkedWriteCallback::OnDone(WriteClient * apWriteClient)
{
if (mProcessingAttributePath.HasValue())
{
// We have a cached status that has yet to be reported to the application so report it now.
// If we failed to receive the response, or we received a malformed response, OnResponse won't be called,
// mProcessingAttributePath will be Missing() in this case.
callback->OnResponse(apWriteClient, mProcessingAttributePath.Value(), mAttributeStatus);
}
mProcessingAttributePath = NullOptional;
mAttributeStatus = StatusIB();
callback->OnDone(apWriteClient);
}
bool ChunkedWriteCallback::IsAppendingToLastItem(const ConcreteDataAttributePath & aPath)
{
if (!aPath.IsListItemOperation())
{
return false;
}
if (!mProcessingAttributePath.HasValue() || !(mProcessingAttributePath.Value() == aPath))
{
return false;
}
return aPath.mListOp == ConcreteDataAttributePath::ListOperation::AppendItem;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,55 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/WriteClient.h>
namespace chip {
namespace app {
/*
* This is an adapter that intercepts calls that deliver status codes from the WriteClient and
* selectively "merge"s the status codes for a chunked list write as follows:
* - If the whole list was successfully written, callback->OnResponse will be called with success.
* - If any element in the list was not successfully written, callback->OnResponse will be called with the first error received.
* - callback->OnResponse will always have NotList as mListOp since we have merged the chunked responses.
* The merge logic assumes all list operations are part of list chunking.
*/
class ChunkedWriteCallback : public WriteClient::Callback
{
public:
ChunkedWriteCallback(WriteClient::Callback * apCallback) : callback(apCallback) {}
void OnResponse(const WriteClient * apWriteClient, const ConcreteDataAttributePath & aPath, StatusIB status) override;
void OnError(const WriteClient * apWriteClient, CHIP_ERROR aError) override;
void OnDone(WriteClient * apWriteClient) override;
private:
bool IsAppendingToLastItem(const ConcreteDataAttributePath & aPath);
// We are using the casts between ConcreteAttributePath and ConcreteDataAttributePath, then all paths passed to upper
// applications will always have NotList as mListOp.
Optional<ConcreteAttributePath> mProcessingAttributePath;
StatusIB mAttributeStatus;
WriteClient::Callback * callback;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,700 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "system/SystemPacketBuffer.h"
#include <app/ClusterStateCache.h>
#include <app/InteractionModelEngine.h>
#include <tuple>
namespace chip {
namespace app {
namespace {
// Determine how much space a StatusIB takes up on the wire.
uint32_t SizeOfStatusIB(const StatusIB & aStatus)
{
// 1 byte: anonymous tag control byte for struct.
// 1 byte: control byte for uint8 value.
// 1 byte: context-specific tag for uint8 value.
// 1 byte: the uint8 value.
// 1 byte: end of container.
uint32_t size = 5;
if (aStatus.mClusterStatus.HasValue())
{
// 1 byte: control byte for uint8 value.
// 1 byte: context-specific tag for uint8 value.
// 1 byte: the uint8 value.
size += 3;
}
return size;
}
} // anonymous namespace
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::GetElementTLVSize(TLV::TLVReader * apData, uint32_t & aSize)
{
Platform::ScopedMemoryBufferWithSize<uint8_t> backingBuffer;
TLV::TLVReader reader;
reader.Init(*apData);
size_t totalBufSize = reader.GetTotalLength();
backingBuffer.Calloc(totalBufSize);
VerifyOrReturnError(backingBuffer.Get() != nullptr, CHIP_ERROR_NO_MEMORY);
TLV::ScopedBufferTLVWriter writer(std::move(backingBuffer), totalBufSize);
ReturnErrorOnFailure(writer.CopyElement(TLV::AnonymousTag(), reader));
aSize = writer.GetLengthWritten();
ReturnErrorOnFailure(writer.Finalize(backingBuffer));
return CHIP_NO_ERROR;
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::UpdateCache(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData,
const StatusIB & aStatus)
{
AttributeState state;
bool endpointIsNew = false;
if (mCache.find(aPath.mEndpointId) == mCache.end())
{
//
// Since we might potentially be creating a new entry at mCache[aPath.mEndpointId][aPath.mClusterId] that
// wasn't there before, we need to check if an entry didn't exist there previously and remember that so that
// we can appropriately notify our clients of the addition of a new endpoint.
//
endpointIsNew = true;
}
if (apData)
{
uint32_t elementSize = 0;
ReturnErrorOnFailure(GetElementTLVSize(apData, elementSize));
if constexpr (CanEnableDataCaching)
{
if (mCacheData)
{
Platform::ScopedMemoryBufferWithSize<uint8_t> backingBuffer;
backingBuffer.Calloc(elementSize);
VerifyOrReturnError(backingBuffer.Get() != nullptr, CHIP_ERROR_NO_MEMORY);
TLV::ScopedBufferTLVWriter writer(std::move(backingBuffer), elementSize);
ReturnErrorOnFailure(writer.CopyElement(TLV::AnonymousTag(), *apData));
ReturnErrorOnFailure(writer.Finalize(backingBuffer));
state.template Set<AttributeData>(std::move(backingBuffer));
}
else
{
state.template Set<uint32_t>(elementSize);
}
}
else
{
state = elementSize;
}
//
// Clear out the committed data version and only set it again once we have received all data for this cluster.
// Otherwise, we may have incomplete data that looks like it's complete since it has a valid data version.
//
mCache[aPath.mEndpointId][aPath.mClusterId].mCommittedDataVersion.ClearValue();
// This commits a pending data version if the last report path is valid and it is different from the current path.
if (mLastReportDataPath.IsValidConcreteClusterPath() && mLastReportDataPath != aPath)
{
CommitPendingDataVersion();
}
bool foundEncompassingWildcardPath = false;
for (const auto & path : mRequestPathSet)
{
if (path.IncludesAllAttributesInCluster(aPath))
{
foundEncompassingWildcardPath = true;
break;
}
}
// if this data item is encompassed by a wildcard path, let's go ahead and update its pending data version.
if (foundEncompassingWildcardPath)
{
mCache[aPath.mEndpointId][aPath.mClusterId].mPendingDataVersion = aPath.mDataVersion;
}
mLastReportDataPath = aPath;
}
else
{
if constexpr (CanEnableDataCaching)
{
if (mCacheData)
{
state.template Set<StatusIB>(aStatus);
}
else
{
state.template Set<uint32_t>(SizeOfStatusIB(aStatus));
}
}
else
{
state = SizeOfStatusIB(aStatus);
}
}
//
// if the endpoint didn't exist previously, let's track the insertion
// so that we can inform our callback of a new endpoint being added appropriately.
//
if (endpointIsNew)
{
mAddedEndpoints.push_back(aPath.mEndpointId);
}
mCache[aPath.mEndpointId][aPath.mClusterId].mAttributes[aPath.mAttributeId] = std::move(state);
if (mCacheData)
{
mChangedAttributeSet.insert(aPath);
}
return CHIP_NO_ERROR;
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::UpdateEventCache(const EventHeader & aEventHeader, TLV::TLVReader * apData,
const StatusIB * apStatus)
{
if (apData)
{
//
// If we've already seen this event before, there's no more work to be done.
//
if (mHighestReceivedEventNumber.HasValue() && aEventHeader.mEventNumber <= mHighestReceivedEventNumber.Value())
{
return CHIP_NO_ERROR;
}
if (mCacheData)
{
System::PacketBufferHandle handle = System::PacketBufferHandle::New(chip::app::kMaxSecureSduLengthBytes);
VerifyOrReturnError(!handle.IsNull(), CHIP_ERROR_NO_MEMORY);
System::PacketBufferTLVWriter writer;
writer.Init(std::move(handle), false);
ReturnErrorOnFailure(writer.CopyElement(TLV::AnonymousTag(), *apData));
ReturnErrorOnFailure(writer.Finalize(&handle));
//
// Compact the buffer down to a more reasonably sized packet buffer
// if we can.
//
handle.RightSize();
EventData eventData;
eventData.first = aEventHeader;
eventData.second = std::move(handle);
mEventDataCache.insert(std::move(eventData));
}
mHighestReceivedEventNumber.SetValue(aEventHeader.mEventNumber);
}
else if (apStatus)
{
if (mCacheData)
{
mEventStatusCache[aEventHeader.mPath] = *apStatus;
}
}
return CHIP_NO_ERROR;
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::OnReportBegin()
{
mLastReportDataPath = ConcreteClusterPath(kInvalidEndpointId, kInvalidClusterId);
mChangedAttributeSet.clear();
mAddedEndpoints.clear();
mCallback.OnReportBegin();
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::CommitPendingDataVersion()
{
if (!mLastReportDataPath.IsValidConcreteClusterPath())
{
return;
}
auto & lastClusterInfo = mCache[mLastReportDataPath.mEndpointId][mLastReportDataPath.mClusterId];
if (lastClusterInfo.mPendingDataVersion.HasValue())
{
lastClusterInfo.mCommittedDataVersion = lastClusterInfo.mPendingDataVersion;
lastClusterInfo.mPendingDataVersion.ClearValue();
}
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::OnReportEnd()
{
CommitPendingDataVersion();
mLastReportDataPath = ConcreteClusterPath(kInvalidEndpointId, kInvalidClusterId);
std::set<std::tuple<EndpointId, ClusterId>> changedClusters;
//
// Add the EndpointId and ClusterId into a set so that we only
// convey unique combinations in the subsequent OnClusterChanged callback.
//
for (auto & path : mChangedAttributeSet)
{
mCallback.OnAttributeChanged(this, path);
changedClusters.insert(std::make_tuple(path.mEndpointId, path.mClusterId));
}
for (auto & item : changedClusters)
{
mCallback.OnClusterChanged(this, std::get<0>(item), std::get<1>(item));
}
for (auto endpoint : mAddedEndpoints)
{
mCallback.OnEndpointAdded(this, endpoint);
}
mCallback.OnReportEnd();
}
template <>
CHIP_ERROR ClusterStateCacheT<true>::Get(const ConcreteAttributePath & path, TLV::TLVReader & reader) const
{
CHIP_ERROR err;
auto attributeState = GetAttributeState(path.mEndpointId, path.mClusterId, path.mAttributeId, err);
ReturnErrorOnFailure(err);
if (attributeState->template Is<StatusIB>())
{
return CHIP_ERROR_IM_STATUS_CODE_RECEIVED;
}
if (!attributeState->template Is<AttributeData>())
{
return CHIP_ERROR_KEY_NOT_FOUND;
}
reader.Init(attributeState->template Get<AttributeData>().Get(), attributeState->template Get<AttributeData>().AllocatedSize());
return reader.Next();
}
template <>
CHIP_ERROR ClusterStateCacheT<false>::Get(const ConcreteAttributePath & path, TLV::TLVReader & reader) const
{
return CHIP_ERROR_KEY_NOT_FOUND;
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::Get(EventNumber eventNumber, TLV::TLVReader & reader) const
{
CHIP_ERROR err;
auto eventData = GetEventData(eventNumber, err);
ReturnErrorOnFailure(err);
System::PacketBufferTLVReader bufReader;
bufReader.Init(eventData->second.Retain());
ReturnErrorOnFailure(bufReader.Next());
reader.Init(bufReader);
return CHIP_NO_ERROR;
}
template <bool CanEnableDataCaching>
const typename ClusterStateCacheT<CanEnableDataCaching>::EndpointState *
ClusterStateCacheT<CanEnableDataCaching>::GetEndpointState(EndpointId endpointId, CHIP_ERROR & err) const
{
auto endpointIter = mCache.find(endpointId);
if (endpointIter == mCache.end())
{
err = CHIP_ERROR_KEY_NOT_FOUND;
return nullptr;
}
err = CHIP_NO_ERROR;
return &endpointIter->second;
}
template <bool CanEnableDataCaching>
const typename ClusterStateCacheT<CanEnableDataCaching>::ClusterState *
ClusterStateCacheT<CanEnableDataCaching>::GetClusterState(EndpointId endpointId, ClusterId clusterId, CHIP_ERROR & err) const
{
auto endpointState = GetEndpointState(endpointId, err);
if (err != CHIP_NO_ERROR)
{
return nullptr;
}
auto clusterState = endpointState->find(clusterId);
if (clusterState == endpointState->end())
{
err = CHIP_ERROR_KEY_NOT_FOUND;
return nullptr;
}
err = CHIP_NO_ERROR;
return &clusterState->second;
}
template <bool CanEnableDataCaching>
const typename ClusterStateCacheT<CanEnableDataCaching>::AttributeState *
ClusterStateCacheT<CanEnableDataCaching>::GetAttributeState(EndpointId endpointId, ClusterId clusterId, AttributeId attributeId,
CHIP_ERROR & err) const
{
auto clusterState = GetClusterState(endpointId, clusterId, err);
if (err != CHIP_NO_ERROR)
{
return nullptr;
}
auto attributeState = clusterState->mAttributes.find(attributeId);
if (attributeState == clusterState->mAttributes.end())
{
err = CHIP_ERROR_KEY_NOT_FOUND;
return nullptr;
}
err = CHIP_NO_ERROR;
return &attributeState->second;
}
template <bool CanEnableDataCaching>
const typename ClusterStateCacheT<CanEnableDataCaching>::EventData *
ClusterStateCacheT<CanEnableDataCaching>::GetEventData(EventNumber eventNumber, CHIP_ERROR & err) const
{
EventData compareKey;
compareKey.first.mEventNumber = eventNumber;
auto eventData = mEventDataCache.find(std::move(compareKey));
if (eventData == mEventDataCache.end())
{
err = CHIP_ERROR_KEY_NOT_FOUND;
return nullptr;
}
err = CHIP_NO_ERROR;
return &(*eventData);
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::OnAttributeData(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData,
const StatusIB & aStatus)
{
//
// Since the cache itself is a ReadClient::Callback, it may be incorrectly passed in directly when registering with the
// ReadClient. This should be avoided, since that bypasses the built-in buffered reader adapter callback that is needed for
// lists to work correctly.
//
// Instead, the right callback should be retrieved using GetBufferedCallback().
//
// To catch such errors, we validate that the provided concrete path never indicates a raw list item operation (which the
// buffered reader will handle and convert for us).
//
//
VerifyOrDie(!aPath.IsListItemOperation());
// Copy the reader for forwarding
TLV::TLVReader dataSnapshot;
if (apData)
{
dataSnapshot.Init(*apData);
}
UpdateCache(aPath, apData, aStatus);
//
// Forward the call through.
//
mCallback.OnAttributeData(aPath, apData ? &dataSnapshot : nullptr, aStatus);
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::GetVersion(const ConcreteClusterPath & aPath,
Optional<DataVersion> & aVersion) const
{
VerifyOrReturnError(aPath.IsValidConcreteClusterPath(), CHIP_ERROR_INVALID_ARGUMENT);
CHIP_ERROR err;
auto clusterState = GetClusterState(aPath.mEndpointId, aPath.mClusterId, err);
ReturnErrorOnFailure(err);
aVersion = clusterState->mCommittedDataVersion;
return CHIP_NO_ERROR;
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::OnEventData(const EventHeader & aEventHeader, TLV::TLVReader * apData,
const StatusIB * apStatus)
{
VerifyOrDie(apData != nullptr || apStatus != nullptr);
TLV::TLVReader dataSnapshot;
if (apData)
{
dataSnapshot.Init(*apData);
}
UpdateEventCache(aEventHeader, apData, apStatus);
mCallback.OnEventData(aEventHeader, apData ? &dataSnapshot : nullptr, apStatus);
}
template <>
CHIP_ERROR ClusterStateCacheT<true>::GetStatus(const ConcreteAttributePath & path, StatusIB & status) const
{
CHIP_ERROR err;
auto attributeState = GetAttributeState(path.mEndpointId, path.mClusterId, path.mAttributeId, err);
ReturnErrorOnFailure(err);
if (!attributeState->template Is<StatusIB>())
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
status = attributeState->template Get<StatusIB>();
return CHIP_NO_ERROR;
}
template <>
CHIP_ERROR ClusterStateCacheT<false>::GetStatus(const ConcreteAttributePath & path, StatusIB & status) const
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::GetStatus(const ConcreteEventPath & path, StatusIB & status) const
{
auto statusIter = mEventStatusCache.find(path);
if (statusIter == mEventStatusCache.end())
{
return CHIP_ERROR_KEY_NOT_FOUND;
}
status = statusIter->second;
return CHIP_NO_ERROR;
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::GetSortedFilters(std::vector<std::pair<DataVersionFilter, size_t>> & aVector) const
{
for (auto const & endpointIter : mCache)
{
EndpointId endpointId = endpointIter.first;
for (auto const & clusterIter : endpointIter.second)
{
if (!clusterIter.second.mCommittedDataVersion.HasValue())
{
continue;
}
DataVersion dataVersion = clusterIter.second.mCommittedDataVersion.Value();
size_t clusterSize = 0;
ClusterId clusterId = clusterIter.first;
for (auto const & attributeIter : clusterIter.second.mAttributes)
{
if constexpr (CanEnableDataCaching)
{
if (attributeIter.second.template Is<StatusIB>())
{
clusterSize += SizeOfStatusIB(attributeIter.second.template Get<StatusIB>());
}
else if (attributeIter.second.template Is<uint32_t>())
{
clusterSize += attributeIter.second.template Get<uint32_t>();
}
else
{
VerifyOrDie(attributeIter.second.template Is<AttributeData>());
TLV::TLVReader bufReader;
bufReader.Init(attributeIter.second.template Get<AttributeData>().Get(),
attributeIter.second.template Get<AttributeData>().AllocatedSize());
ReturnOnFailure(bufReader.Next());
// Skip to the end of the element.
ReturnOnFailure(bufReader.Skip());
// Compute the amount of value data
clusterSize += bufReader.GetLengthRead();
}
}
else
{
clusterSize += attributeIter.second;
}
}
if (clusterSize == 0)
{
// No data in this cluster, so no point in sending a dataVersion
// along at all.
continue;
}
DataVersionFilter filter(endpointId, clusterId, dataVersion);
aVector.push_back(std::make_pair(filter, clusterSize));
}
}
std::sort(aVector.begin(), aVector.end(),
[](const std::pair<DataVersionFilter, size_t> & x, const std::pair<DataVersionFilter, size_t> & y) {
return x.second > y.second;
});
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::OnUpdateDataVersionFilterList(
DataVersionFilterIBs::Builder & aDataVersionFilterIBsBuilder, const Span<AttributePathParams> & aAttributePaths,
bool & aEncodedDataVersionList)
{
CHIP_ERROR err = CHIP_NO_ERROR;
TLV::TLVWriter backup;
// Only put paths into mRequestPathSet if they cover clusters in their entirety and no other path in our path list
// points to a specific attribute from any of those clusters.
// this would help for data-out-of-sync issue when handling store data version for the particular case on two paths: (E1, C1,
// wildcard), (wildcard, C1, A1)
for (auto & attribute1 : aAttributePaths)
{
if (attribute1.HasWildcardAttributeId())
{
bool intersected = false;
for (auto & attribute2 : aAttributePaths)
{
if (attribute2.HasWildcardAttributeId())
{
continue;
}
if (attribute1.Intersects(attribute2))
{
intersected = true;
break;
}
}
if (!intersected)
{
mRequestPathSet.insert(attribute1);
}
}
}
std::vector<std::pair<DataVersionFilter, size_t>> filterVector;
GetSortedFilters(filterVector);
aEncodedDataVersionList = false;
for (auto & filter : filterVector)
{
bool intersected = false;
aDataVersionFilterIBsBuilder.Checkpoint(backup);
// if the particular cached cluster does not intersect with user provided attribute paths, skip the cached one
for (const auto & attributePath : aAttributePaths)
{
if (attributePath.IncludesAttributesInCluster(filter.first))
{
intersected = true;
break;
}
}
if (!intersected)
{
continue;
}
SuccessOrExit(err = aDataVersionFilterIBsBuilder.EncodeDataVersionFilterIB(filter.first));
aEncodedDataVersionList = true;
}
exit:
if (err == CHIP_ERROR_NO_MEMORY || err == CHIP_ERROR_BUFFER_TOO_SMALL)
{
ChipLogProgress(DataManagement, "OnUpdateDataVersionFilterList out of space; rolling back");
aDataVersionFilterIBsBuilder.Rollback(backup);
err = CHIP_NO_ERROR;
}
return err;
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::ClearAttributes(EndpointId endpointId)
{
mCache.erase(endpointId);
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::ClearAttributes(const ConcreteClusterPath & cluster)
{
// Can't use GetEndpointState here, since that only handles const things.
auto endpointIter = mCache.find(cluster.mEndpointId);
if (endpointIter == mCache.end())
{
return;
}
auto & endpointState = endpointIter->second;
endpointState.erase(cluster.mClusterId);
}
template <bool CanEnableDataCaching>
void ClusterStateCacheT<CanEnableDataCaching>::ClearAttribute(const ConcreteAttributePath & attribute)
{
// Can't use GetClusterState here, since that only handles const things.
auto endpointIter = mCache.find(attribute.mEndpointId);
if (endpointIter == mCache.end())
{
return;
}
auto & endpointState = endpointIter->second;
auto clusterIter = endpointState.find(attribute.mClusterId);
if (clusterIter == endpointState.end())
{
return;
}
auto & clusterState = clusterIter->second;
clusterState.mAttributes.erase(attribute.mAttributeId);
}
template <bool CanEnableDataCaching>
CHIP_ERROR ClusterStateCacheT<CanEnableDataCaching>::GetLastReportDataPath(ConcreteClusterPath & aPath)
{
if (mLastReportDataPath.IsValidConcreteClusterPath())
{
aPath = mLastReportDataPath;
return CHIP_NO_ERROR;
}
return CHIP_ERROR_INCORRECT_STATE;
}
// Ensure that our out-of-line template methods actually get compiled.
template class ClusterStateCacheT<true>;
template class ClusterStateCacheT<false>;
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,709 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "lib/core/CHIPError.h"
#include "system/SystemPacketBuffer.h"
#include "system/TLVPacketBufferBackingStore.h"
#include <app/AppConfig.h>
#include <app/AttributePathParams.h>
#include <app/BufferedReadCallback.h>
#include <app/ReadClient.h>
#include <app/data-model/DecodableList.h>
#include <app/data-model/Decode.h>
#include <lib/support/Variant.h>
#include <list>
#include <map>
#include <queue>
#include <set>
#include <vector>
#if CHIP_CONFIG_ENABLE_READ_CLIENT
namespace chip {
namespace app {
/*
* This implements a cluster state cache designed to aggregate both attribute and event data received by a client
* from either read or subscribe interactions and keep it resident and available for clients to
* query at any time while the cache is active.
*
* The cache can be used with either read/subscribe, with the consumer connecting it up appropriately
* to the right ReadClient instance.
*
* The cache provides an up-to-date and consistent view of the state of a target node, with the scope of the
* state being determined by the associated ReadClient's path set.
*
* The cache provides a number of getters and helper functions to iterate over the topology
* of the received data which is organized by endpoint, cluster and attribute ID (for attributes). These permit greater
* flexibility when dealing with interactions that use wildcards heavily.
*
* For events, functions that permit iteration over the cached events sorted by event number are provided.
*
* The data is stored internally in the cache as TLV. This permits re-use of the existing cluster objects
* to de-serialize the state on-demand.
*
* The cache serves as a callback adapter as well in that it 'forwards' the ReadClient::Callback calls transparently
* through to a registered callback. In addition, it provides its own enhancements to the base ReadClient::Callback
* to make it easier to know what has changed in the cache.
*
* **NOTE**
* 1. This already includes the BufferedReadCallback, so there is no need to add that to the ReadClient callback chain.
* 2. The same cache cannot be used by multiple subscribe/read interactions at the same time.
*
*/
template <bool CanEnableDataCaching>
class ClusterStateCacheT : protected ReadClient::Callback
{
public:
class Callback : public ReadClient::Callback
{
public:
Callback() = default;
// Callbacks are not expected to be copyable or movable.
Callback(const Callback &) = delete;
Callback(Callback &&) = delete;
Callback & operator=(const Callback &) = delete;
Callback & operator=(Callback &&) = delete;
/*
* Called anytime an attribute value has changed in the cache
*/
virtual void OnAttributeChanged(ClusterStateCacheT * cache, const ConcreteAttributePath & path){};
/*
* Called anytime any attribute in a cluster has changed in the cache
*/
virtual void OnClusterChanged(ClusterStateCacheT * cache, EndpointId endpointId, ClusterId clusterId){};
/*
* Called anytime an endpoint was added to the cache
*/
virtual void OnEndpointAdded(ClusterStateCacheT * cache, EndpointId endpointId){};
};
/**
*
* @param [in] callback the derived callback which inherit from ReadClient::Callback
* @param [in] highestReceivedEventNumber optional highest received event number, if cache receive the events with the number
* less than or equal to this value, skip those events
*/
ClusterStateCacheT(Callback & callback, Optional<EventNumber> highestReceivedEventNumber = Optional<EventNumber>::Missing()) :
mCallback(callback), mBufferedReader(*this)
{
mHighestReceivedEventNumber = highestReceivedEventNumber;
}
template <bool DataCachingEnabled = CanEnableDataCaching, std::enable_if_t<DataCachingEnabled, bool> = true>
ClusterStateCacheT(Callback & callback, Optional<EventNumber> highestReceivedEventNumber = Optional<EventNumber>::Missing(),
bool cacheData = true) :
mCallback(callback),
mBufferedReader(*this), mCacheData(cacheData)
{
mHighestReceivedEventNumber = highestReceivedEventNumber;
}
ClusterStateCacheT(const ClusterStateCacheT &) = delete;
ClusterStateCacheT(ClusterStateCacheT &&) = delete;
ClusterStateCacheT & operator=(const ClusterStateCacheT &) = delete;
ClusterStateCacheT & operator=(ClusterStateCacheT &&) = delete;
void SetHighestReceivedEventNumber(EventNumber highestReceivedEventNumber)
{
mHighestReceivedEventNumber.SetValue(highestReceivedEventNumber);
}
/*
* When registering as a callback to the ReadClient, the ClusterStateCache cannot not be passed as a callback
* directly. Instead, utilize this method below to correctly set up the callback chain such that
* the buffered reader is the first callback in the chain before calling into cache subsequently.
*/
ReadClient::Callback & GetBufferedCallback() { return mBufferedReader; }
/*
* Retrieve the value of an attribute from the cache (if present) given a concrete path by decoding
* it using DataModel::Decode into the in-out argument 'value'.
*
* For some types of attributes, the value for the attribute is directly backed by the underlying TLV buffer
* and has pointers into that buffer. (e.g octet strings, char strings and lists). This buffer only remains
* valid until the cached value for that path is updated, so it must not be held
* across any async call boundaries.
*
* The template parameter AttributeObjectTypeT is generally expected to be a
* ClusterName::Attributes::AttributeName::DecodableType, but any
* object that can be decoded using the DataModel::Decode machinery will work.
*
* Notable return values:
* - If the provided attribute object's Cluster and Attribute IDs don't match that of the provided path,
* a CHIP_ERROR_SCHEMA_MISMATCH shall be returned.
*
* - If neither data or status for the specified path don't exist in the cache, CHIP_ERROR_KEY_NOT_FOUND
* shall be returned.
*
* - If a StatusIB is present in the cache instead of data, a CHIP_ERROR_IM_STATUS_CODE_RECEIVED error
* shall be returned from this call instead. The actual StatusIB can be retrieved using the GetStatus() API below.
*
*/
template <typename AttributeObjectTypeT>
CHIP_ERROR Get(const ConcreteAttributePath & path, typename AttributeObjectTypeT::DecodableType & value) const
{
TLV::TLVReader reader;
if (path.mClusterId != AttributeObjectTypeT::GetClusterId() || path.mAttributeId != AttributeObjectTypeT::GetAttributeId())
{
return CHIP_ERROR_SCHEMA_MISMATCH;
}
ReturnErrorOnFailure(Get(path, reader));
return DataModel::Decode(reader, value);
}
/**
* Get the value of a particular attribute for the given endpoint. See the
* documentation for Get() with a ConcreteAttributePath above.
*/
template <typename AttributeObjectTypeT>
CHIP_ERROR Get(EndpointId endpoint, typename AttributeObjectTypeT::DecodableType & value) const
{
ConcreteAttributePath path(endpoint, AttributeObjectTypeT::GetClusterId(), AttributeObjectTypeT::GetAttributeId());
return Get<AttributeObjectTypeT>(path, value);
}
/*
* Retrieve the StatusIB for a given attribute if one exists currently in the cache.
*
* Notable return values:
* - If neither data or status for the specified path don't exist in the cache, CHIP_ERROR_KEY_NOT_FOUND
* shall be returned.
*
* - If data exists in the cache instead of status, CHIP_ERROR_INVALID_ARGUMENT shall be returned.
*
*/
CHIP_ERROR GetStatus(const ConcreteAttributePath & path, StatusIB & status) const;
/*
* Encapsulates a StatusIB and a ConcreteAttributePath pair.
*/
struct AttributeStatus
{
AttributeStatus(const ConcreteAttributePath & path, StatusIB & status) : mPath(path), mStatus(status) {}
ConcreteAttributePath mPath;
StatusIB mStatus;
};
/*
* Retrieve the value of an entire cluster instance from the cache (if present) given a path
* and decode it using DataModel::Decode into the in-out argument 'value'. If any StatusIBs
* are present in the cache instead of data, they will be provided in the statusList argument.
*
* For some types of attributes, the value for the attribute is directly backed by the underlying TLV buffer
* and has pointers into that buffer. (e.g octet strings, char strings and lists). This buffer only remains
* valid until the cached value for that path is updated, so it must not be held
* across any async call boundaries.
*
* The template parameter ClusterObjectT is generally expected to be a
* ClusterName::Attributes::DecodableType, but any
* object that can be decoded using the DataModel::Decode machinery will work.
*
* Notable return values:
* - If neither data or status for the specified path exist in the cache, CHIP_ERROR_KEY_NOT_FOUND
* shall be returned.
*
*/
template <typename ClusterObjectTypeT>
CHIP_ERROR Get(EndpointId endpointId, ClusterId clusterId, ClusterObjectTypeT & value,
std::list<AttributeStatus> & statusList) const
{
statusList.clear();
return ForEachAttribute(endpointId, clusterId, [&value, this, &statusList](const ConcreteAttributePath & path) {
TLV::TLVReader reader;
CHIP_ERROR err;
err = Get(path, reader);
if (err == CHIP_ERROR_IM_STATUS_CODE_RECEIVED)
{
StatusIB status;
ReturnErrorOnFailure(GetStatus(path, status));
statusList.push_back(AttributeStatus(path, status));
err = CHIP_NO_ERROR;
}
else if (err == CHIP_NO_ERROR)
{
ReturnErrorOnFailure(DataModel::Decode(reader, path, value));
}
else
{
return err;
}
return CHIP_NO_ERROR;
});
}
/*
* Retrieve the value of an attribute by updating a in-out TLVReader to be positioned
* right at the attribute value.
*
* The underlying TLV buffer only remains valid until the cached value for that path is updated, so it must
* not be held across any async call boundaries.
*
* Notable return values:
* - If neither data nor status for the specified path exist in the cache, CHIP_ERROR_KEY_NOT_FOUND
* shall be returned.
*
* - If a StatusIB is present in the cache instead of data, a CHIP_ERROR_IM_STATUS_CODE_RECEIVED error
* shall be returned from this call instead. The actual StatusIB can be retrieved using the GetStatus() API above.
*
*/
CHIP_ERROR Get(const ConcreteAttributePath & path, TLV::TLVReader & reader) const;
/*
* Retrieve the data version for the given cluster. If there is no data for the specified path in the cache,
* CHIP_ERROR_KEY_NOT_FOUND shall be returned. Otherwise aVersion will be set to the
* current data version for the cluster (which may have no value if we don't have a known data version
* for it, for example because none of our paths were wildcards that covered the whole cluster).
*/
CHIP_ERROR GetVersion(const ConcreteClusterPath & path, Optional<DataVersion> & aVersion) const;
/*
* Get highest received event number.
*/
virtual CHIP_ERROR GetHighestReceivedEventNumber(Optional<EventNumber> & aEventNumber) final
{
aEventNumber = mHighestReceivedEventNumber;
return CHIP_NO_ERROR;
}
/*
* Retrieve the value of an event from the cache given an EventNumber by decoding
* it using DataModel::Decode into the in-out argument 'value'.
*
* This should be used in conjunction with the ForEachEvent() iterator function to
* retrieve the EventHeader (and corresponding metadata information for the event) along with its EventNumber.
*
* For some types of events, the values for the fields in the event are directly backed by the underlying TLV buffer
* and have pointers into that buffer. (e.g octet strings, char strings and lists). Unlike its attribute counterpart,
* these pointers are stable and will not change until a call to `ClearEventCache` happens.
*
* The template parameter EventObjectTypeT is generally expected to be a
* ClusterName::Events::EventName::DecodableType, but any
* object that can be decoded using the DataModel::Decode machinery will work.
*
* Notable return values:
* - If the provided event object's Cluster and Event IDs don't match those of the event in the cache,
* a CHIP_ERROR_SCHEMA_MISMATCH shall be returned.
*
* - If event doesn't exist in the cache, CHIP_ERROR_KEY_NOT_FOUND
* shall be returned.
*/
template <typename EventObjectTypeT>
CHIP_ERROR Get(EventNumber eventNumber, EventObjectTypeT & value) const
{
TLV::TLVReader reader;
CHIP_ERROR err;
auto * eventData = GetEventData(eventNumber, err);
ReturnErrorOnFailure(err);
if (eventData->first.mPath.mClusterId != value.GetClusterId() || eventData->first.mPath.mEventId != value.GetEventId())
{
return CHIP_ERROR_SCHEMA_MISMATCH;
}
ReturnErrorOnFailure(Get(eventNumber, reader));
return DataModel::Decode(reader, value);
}
/*
* Retrieve the data of an event by updating a in-out TLVReader to be positioned
* right at the structure that encapsulates the event payload.
*
* Notable return values:
* - If no event with a matching eventNumber exists in the cache, CHIP_ERROR_KEY_NOT_FOUND
* shall be returned.
*
*/
CHIP_ERROR Get(EventNumber eventNumber, TLV::TLVReader & reader) const;
/*
* Retrieve the StatusIB for a specific event from the event status cache (if one exists).
* Otherwise, a CHIP_ERROR_KEY_NOT_FOUND error will be returned.
*
* This is to be used with the `ForEachEventStatus` iterator function.
*
* NOTE: Receipt of a StatusIB does not affect any pre-existing or future event data entries in the cache (and vice-versa).
*
*/
CHIP_ERROR GetStatus(const ConcreteEventPath & path, StatusIB & status) const;
/*
* Execute an iterator function that is called for every attribute
* in a given endpoint and cluster. The function when invoked is provided a concrete attribute path
* to every attribute that matches in the cache.
*
* The iterator is expected to have this signature:
* CHIP_ERROR IteratorFunc(const ConcreteAttributePath &path);
*
* Notable return values:
* - If a cluster instance corresponding to endpointId and clusterId doesn't exist in the cache,
* CHIP_ERROR_KEY_NOT_FOUND shall be returned.
*
* - If func returns an error, that will result in termination of any further iteration over attributes
* and that error shall be returned back up to the original call to this function.
*
*/
template <typename IteratorFunc>
CHIP_ERROR ForEachAttribute(EndpointId endpointId, ClusterId clusterId, IteratorFunc func) const
{
CHIP_ERROR err;
auto clusterState = GetClusterState(endpointId, clusterId, err);
ReturnErrorOnFailure(err);
for (auto & attributeIter : clusterState->mAttributes)
{
const ConcreteAttributePath path(endpointId, clusterId, attributeIter.first);
ReturnErrorOnFailure(func(path));
}
return CHIP_NO_ERROR;
}
/*
* Execute an iterator function that is called for every attribute
* for a given cluster across all endpoints in the cache. The function is passed a
* concrete attribute path to every attribute that matches in the cache.
*
* The iterator is expected to have this signature:
* CHIP_ERROR IteratorFunc(const ConcreteAttributePath &path);
*
* Notable return values:
* - If func returns an error, that will result in termination of any further iteration over attributes
* and that error shall be returned back up to the original call to this function.
*
*/
template <typename IteratorFunc>
CHIP_ERROR ForEachAttribute(ClusterId clusterId, IteratorFunc func) const
{
for (auto & endpointIter : mCache)
{
for (auto & clusterIter : endpointIter.second)
{
if (clusterIter.first == clusterId)
{
for (auto & attributeIter : clusterIter.second.mAttributes)
{
const ConcreteAttributePath path(endpointIter.first, clusterId, attributeIter.first);
ReturnErrorOnFailure(func(path));
}
}
}
}
return CHIP_NO_ERROR;
}
/*
* Execute an iterator function that is called for every cluster
* in a given endpoint and passed a ClusterId for every cluster that
* matches.
*
* The iterator is expected to have this signature:
* CHIP_ERROR IteratorFunc(ClusterId clusterId);
*
* Notable return values:
* - If func returns an error, that will result in termination of any further iteration over attributes
* and that error shall be returned back up to the original call to this function.
*
*/
template <typename IteratorFunc>
CHIP_ERROR ForEachCluster(EndpointId endpointId, IteratorFunc func) const
{
auto endpointIter = mCache.find(endpointId);
if (endpointIter->first == endpointId)
{
for (auto & clusterIter : endpointIter->second)
{
ReturnErrorOnFailure(func(clusterIter.first));
}
}
return CHIP_NO_ERROR;
}
/*
* Execute an iterator function that is called for every event in the event data cache that satisfies the following
* conditions:
* - It matches the provided path filter
* - Its event number is greater than or equal to the provided minimum event number filter.
*
* Each filter argument can be omitted from the match criteria above by passing in an empty EventPathParams() and/or
* a minimum event filter of 0.
*
* This iterator is called in increasing order from the event with the lowest event number to the highest.
*
* The function is passed a const reference to the EventHeader associated with that event.
*
* The iterator is expected to have this signature:
* CHIP_ERROR IteratorFunc(const EventHeader & eventHeader);
*
* Notable return values:
* - If func returns an error, that will result in termination of any further iteration over events
* and that error shall be returned back up to the original call to this function.
*
*/
template <typename IteratorFunc>
CHIP_ERROR ForEachEventData(IteratorFunc func, EventPathParams pathFilter = EventPathParams(),
EventNumber minEventNumberFilter = 0) const
{
for (const auto & item : mEventDataCache)
{
if (pathFilter.IsEventPathSupersetOf(item.first.mPath) && item.first.mEventNumber >= minEventNumberFilter)
{
ReturnErrorOnFailure(func(item.first));
}
}
return CHIP_NO_ERROR;
}
/*
* Execute an iterator function that is called for every StatusIB in the event status cache.
*
* The iterator is expected to have this signature:
* CHIP_ERROR IteratorFunc(const ConcreteEventPath & eventPath, const StatusIB & statusIB);
*
* Notable return values:
* - If func returns an error, that will result in termination of any further iteration over events
* and that error shall be returned back up to the original call to this function.
*
* NOTE: Receipt of a StatusIB does not affect any pre-existing event data entries in the cache (and vice-versa).
*
*/
template <typename IteratorFunc>
CHIP_ERROR ForEachEventStatus(IteratorFunc func) const
{
for (const auto & item : mEventStatusCache)
{
ReturnErrorOnFailure(func(item.first, item.second));
}
}
/*
* Clear out all the attribute data and DataVersions stored for a given endpoint.
*/
void ClearAttributes(EndpointId endpoint);
/*
* Clear out all the attribute data and the DataVersion stored for a given cluster.
*/
void ClearAttributes(const ConcreteClusterPath & cluster);
/*
* Clear out the data (or size, if not storing data) stored for an
* attribute.
*/
void ClearAttribute(const ConcreteAttributePath & attribute);
/*
* Clear out the event data and status caches.
*
* By default, this will not clear out any internally tracked event counters, specifically:
* - the highest event number seen so far. This is used in reads/subscribe requests to express to the receiving
* server to not send events that the client has already seen so far.
*
* That can be over-ridden by passing in 'true' to `resetTrackedEventCounters`.
*
*/
void ClearEventCache(bool resetTrackedEventCounters = false)
{
mEventDataCache.clear();
if (resetTrackedEventCounters)
{
mHighestReceivedEventNumber.ClearValue();
}
mEventStatusCache.clear();
}
/*
* Get the last concrete report data path, if path is not concrete cluster path, return CHIP_ERROR_NOT_FOUND
*
*/
CHIP_ERROR GetLastReportDataPath(ConcreteClusterPath & aPath);
private:
// An attribute state can be one of three things:
// * If we got a path-specific error for the attribute, the corresponding
// status.
// * If we got data for the attribute and we are storing data ourselves, the
// data.
// * If we got data for the attribute and we are not storing data
// oureselves, the size of the data, so we can still prioritize sending
// DataVersions correctly.
//
// The data for a single attribute is not going to be gigabytes in size, so
// using uint32_t for the size is fine; on 64-bit systems this can save
// quite a bit of space.
using AttributeData = Platform::ScopedMemoryBufferWithSize<uint8_t>;
using AttributeState = std::conditional_t<CanEnableDataCaching, Variant<StatusIB, AttributeData, uint32_t>, uint32_t>;
// mPendingDataVersion represents a tentative data version for a cluster that we have gotten some reports for.
//
// mCurrentDataVersion represents a known data version for a cluster. In order for this to have a
// value the cluster must be included in a path in mRequestPathSet that has a wildcard attribute
// and we must not be in the middle of receiving reports for that cluster.
struct ClusterState
{
std::map<AttributeId, AttributeState> mAttributes;
Optional<DataVersion> mPendingDataVersion;
Optional<DataVersion> mCommittedDataVersion;
};
using EndpointState = std::map<ClusterId, ClusterState>;
using NodeState = std::map<EndpointId, EndpointState>;
struct Comparator
{
bool operator()(const AttributePathParams & x, const AttributePathParams & y) const
{
return x.mEndpointId < y.mEndpointId || x.mClusterId < y.mClusterId;
}
};
using EventData = std::pair<EventHeader, System::PacketBufferHandle>;
//
// This is a custom comparator for use with the std::set<EventData> below. Uniqueness
// is determined solely by the event number associated with each event.
//
struct EventDataCompare
{
bool operator()(const EventData & lhs, const EventData & rhs) const
{
return (lhs.first.mEventNumber < rhs.first.mEventNumber);
}
};
/*
* These functions provide a way to index into the cached state with different sub-sets of a path, returning
* appropriate slices of the data as requested.
*
* In all variants, the respective slice is returned if a valid path is provided. 'err' is updated to reflect
* the status of the operation.
*
* Notable status values:
* - If a cluster instance corresponding to endpointId and clusterId doesn't exist in the cache,
* CHIP_ERROR_KEY_NOT_FOUND shall be returned.
*
*/
const EndpointState * GetEndpointState(EndpointId endpointId, CHIP_ERROR & err) const;
const ClusterState * GetClusterState(EndpointId endpointId, ClusterId clusterId, CHIP_ERROR & err) const;
const AttributeState * GetAttributeState(EndpointId endpointId, ClusterId clusterId, AttributeId attributeId,
CHIP_ERROR & err) const;
const EventData * GetEventData(EventNumber number, CHIP_ERROR & err) const;
/*
* Updates the state of an attribute in the cache given a reader. If the reader is null, the state is updated
* with the provided status.
*/
CHIP_ERROR UpdateCache(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData, const StatusIB & aStatus);
/*
* If apData is not null, updates the cached event set with the specified event header + payload.
* If apData is null and apStatus is not null, the StatusIB is stored in the event status cache.
*
* Storage of either of these do not affect pre-existing data for the other events in the cache.
*
*/
CHIP_ERROR UpdateEventCache(const EventHeader & aEventHeader, TLV::TLVReader * apData, const StatusIB * apStatus);
//
// ReadClient::Callback
//
void OnReportBegin() override;
void OnReportEnd() override;
void OnAttributeData(const ConcreteDataAttributePath & aPath, TLV::TLVReader * apData, const StatusIB & aStatus) override;
void OnError(CHIP_ERROR aError) override { return mCallback.OnError(aError); }
void OnEventData(const EventHeader & aEventHeader, TLV::TLVReader * apData, const StatusIB * apStatus) override;
void OnDone(ReadClient * apReadClient) override
{
mRequestPathSet.clear();
return mCallback.OnDone(apReadClient);
}
void OnSubscriptionEstablished(SubscriptionId aSubscriptionId) override
{
mCallback.OnSubscriptionEstablished(aSubscriptionId);
}
CHIP_ERROR OnResubscriptionNeeded(ReadClient * apReadClient, CHIP_ERROR aTerminationCause) override
{
return mCallback.OnResubscriptionNeeded(apReadClient, aTerminationCause);
}
void OnDeallocatePaths(chip::app::ReadPrepareParams && aReadPrepareParams) override
{
mCallback.OnDeallocatePaths(std::move(aReadPrepareParams));
}
virtual CHIP_ERROR OnUpdateDataVersionFilterList(DataVersionFilterIBs::Builder & aDataVersionFilterIBsBuilder,
const Span<AttributePathParams> & aAttributePaths,
bool & aEncodedDataVersionList) override;
void OnUnsolicitedMessageFromPublisher(ReadClient * apReadClient) override
{
return mCallback.OnUnsolicitedMessageFromPublisher(apReadClient);
}
void OnCASESessionEstablished(const SessionHandle & aSession, ReadPrepareParams & aSubscriptionParams) override
{
return mCallback.OnCASESessionEstablished(aSession, aSubscriptionParams);
}
// Commit the pending cluster data version, if there is one.
void CommitPendingDataVersion();
// Get our list of data version filters, sorted from larges to smallest by the total size of the TLV
// payload for the filter's cluster. Applying filters in this order should maximize space savings
// on the wire if not all filters can be applied.
void GetSortedFilters(std::vector<std::pair<DataVersionFilter, size_t>> & aVector) const;
CHIP_ERROR GetElementTLVSize(TLV::TLVReader * apData, uint32_t & aSize);
Callback & mCallback;
NodeState mCache;
std::set<ConcreteAttributePath> mChangedAttributeSet;
std::set<AttributePathParams, Comparator> mRequestPathSet; // wildcard attribute request path only
std::vector<EndpointId> mAddedEndpoints;
std::set<EventData, EventDataCompare> mEventDataCache;
Optional<EventNumber> mHighestReceivedEventNumber;
std::map<ConcreteEventPath, StatusIB> mEventStatusCache;
BufferedReadCallback mBufferedReader;
ConcreteClusterPath mLastReportDataPath = ConcreteClusterPath(kInvalidEndpointId, kInvalidClusterId);
const bool mCacheData = CanEnableDataCaching;
};
using ClusterStateCache = ClusterStateCacheT<true>;
using ClusterStateCacheNoData = ClusterStateCacheT<false>;
}; // namespace app
}; // namespace chip
#endif // CHIP_CONFIG_ENABLE_READ_CLIENT

View File

@@ -0,0 +1,55 @@
/*
*
* Copyright (c) 2020-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/CommandHandler.h>
namespace chip {
namespace app {
void CommandHandler::Handle::Init(CommandHandler * handler)
{
if (handler != nullptr)
{
handler->IncrementHoldOff(this);
mpHandler = handler;
}
}
CommandHandler * CommandHandler::Handle::Get()
{
// Not safe to work with CommandHandlerImpl in parallel with other Matter work.
assertChipStackLockedByCurrentThread();
return mpHandler;
}
void CommandHandler::Handle::Release()
{
if (mpHandler != nullptr)
{
mpHandler->DecrementHoldOff(this);
Invalidate();
}
}
CommandHandler::Handle::Handle(CommandHandler * handler)
{
Init(handler);
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,309 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CommandHandlerExchangeInterface.h>
#include <app/ConcreteCommandPath.h>
#include <app/data-model/EncodableToTLV.h>
#include <app/data-model/Encode.h>
#include <lib/core/CHIPCore.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/IntrusiveList.h>
#include <lib/support/logging/CHIPLogging.h>
#include <protocols/interaction_model/StatusCode.h>
namespace chip {
namespace app {
/**
* A handler for incoming Invoke interactions.
*
* Allows adding responses to be sent in an InvokeResponse: see the various
* "Add*" methods.
*
* Allows adding the responses asynchronously when using `CommandHandler::Handle`
* (see documentation for `CommandHandler::Handle` for details)
*
* Upgrading notes: this class has moved to an interface from a previous more complex
* implementation. If upgrading code between versions, please see docs/upgrading.md
*/
class CommandHandler
{
public:
virtual ~CommandHandler() = default;
/**
* Class that allows asynchronous command processing before sending a
* response. When such processing is desired:
*
* 1) Create a Handle initialized with the CommandHandler that delivered the
* incoming command.
* 2) Ensure the Handle, or some Handle it's moved into via the move
* constructor or move assignment operator, remains alive during the
* course of the asynchronous processing.
* 3) Ensure that the ConcreteCommandPath involved will be known when
* sending the response.
* 4) When ready to send the response:
* * Ensure that no other Matter tasks are running in parallel (e.g. by
* running on the Matter event loop or holding the Matter stack lock).
* * Call Get() to get the CommandHandler.
* * Check that Get() did not return null.
* * Add the response to the CommandHandler via one of the Add* methods.
* * Let the Handle get destroyed, or manually call Handle::Release() if
* destruction of the Handle is not desirable for some reason.
*
* The Invoke Response will not be sent until all outstanding Handles have
* been destroyed or have had Release called.
*/
class Handle : public IntrusiveListNodeBase<>
{
public:
Handle() {}
Handle(const Handle & handle) = delete;
Handle(Handle && handle)
{
Init(handle.mpHandler);
handle.Release();
}
Handle(decltype(nullptr)) {}
Handle(CommandHandler * handler);
~Handle() { Release(); }
Handle & operator=(Handle && handle)
{
Release();
Init(handle.mpHandler);
handle.Release();
return *this;
}
Handle & operator=(decltype(nullptr))
{
Release();
return *this;
}
/**
* Get the CommandHandler object it holds. Get() may return a nullptr if the CommandHandler object it holds is no longer
* valid.
*/
CommandHandler * Get();
void Release();
void Invalidate() { mpHandler = nullptr; }
private:
void Init(CommandHandler * handler);
CommandHandler * mpHandler = nullptr;
};
/**
* Adds the given command status and returns any failures in adding statuses (e.g. out
* of buffer space) to the caller. `context` is an optional (if not nullptr)
* debug string to include in logging.
*/
virtual CHIP_ERROR FallibleAddStatus(const ConcreteCommandPath & aRequestCommandPath,
const Protocols::InteractionModel::ClusterStatusCode & aStatus,
const char * context = nullptr) = 0;
CHIP_ERROR FallibleAddStatus(const ConcreteCommandPath & aRequestCommandPath, const Protocols::InteractionModel::Status aStatus,
const char * context = nullptr)
{
return FallibleAddStatus(aRequestCommandPath, Protocols::InteractionModel::ClusterStatusCode{ aStatus }, context);
}
/**
* Adds an IM global or Cluster status when the caller is unable to handle any failures. Logging is performed
* and failure to register the status is checked with VerifyOrDie. `context` is an optional (if not nullptr)
* debug string to include in logging.
*/
virtual void AddStatus(const ConcreteCommandPath & aRequestCommandPath,
const Protocols::InteractionModel::ClusterStatusCode & aStatus, const char * context = nullptr) = 0;
void AddStatus(const ConcreteCommandPath & aRequestCommandPath, const Protocols::InteractionModel::Status aStatus,
const char * context = nullptr)
{
AddStatus(aRequestCommandPath, Protocols::InteractionModel::ClusterStatusCode{ aStatus }, context);
}
/**
* Sets the response to indicate Success with a cluster-specific status code `aClusterStatus` included.
*
* NOTE: For regular success, what you want is AddStatus/FailibleAddStatus(aRequestCommandPath,
* InteractionModel::Status::Success).
*/
virtual CHIP_ERROR AddClusterSpecificSuccess(const ConcreteCommandPath & aRequestCommandPath, ClusterStatus aClusterStatus)
{
return FallibleAddStatus(aRequestCommandPath,
Protocols::InteractionModel::ClusterStatusCode::ClusterSpecificSuccess(aClusterStatus));
}
/**
* Sets the response to indicate Failure with a cluster-specific status code `aClusterStatus` included.
*/
virtual CHIP_ERROR AddClusterSpecificFailure(const ConcreteCommandPath & aRequestCommandPath, ClusterStatus aClusterStatus)
{
return FallibleAddStatus(aRequestCommandPath,
Protocols::InteractionModel::ClusterStatusCode::ClusterSpecificFailure(aClusterStatus));
}
/**
* GetAccessingFabricIndex() may only be called during synchronous command
* processing. Anything that runs async (while holding a
* CommandHandler::Handle or equivalent) must not call this method, because
* it will not work right if the session we're using was evicted.
*/
virtual FabricIndex GetAccessingFabricIndex() const = 0;
/**
* API for adding a data response. The `aEncodable` is generally expected to encode
* a ClusterName::Commands::CommandName::Type struct, however any object should work.
*
* @param [in] aRequestCommandPath the concrete path of the command we are
* responding to.
* @param [in] aResponseCommandId the command whose content is being encoded.
* @param [in] aEncodable - an encodable that places the command data structure
* for `aResponseCommandId` into a TLV Writer.
*
* If you have no great way of handling the returned CHIP_ERROR, consider
* using `AddResponse` which will automatically reply with `Failure` in
* case AddResponseData fails.
*/
virtual CHIP_ERROR AddResponseData(const ConcreteCommandPath & aRequestCommandPath, CommandId aResponseCommandId,
const DataModel::EncodableToTLV & aEncodable) = 0;
/**
* Attempts to encode a response to a command.
*
* `aRequestCommandPath` represents the request path (endpoint/cluster/commandid) and the reply
* will preserve the same path and switch the command id to aResponseCommandId.
*
* As this command does not return any error codes, it must try its best to encode the reply
* and if it fails, it MUST encode a `Protocols::InteractionModel::Status::Failure` as a
* reply (i.e. a reply is guaranteed to be sent).
*
* Above is the main difference from AddResponseData: AddResponse will auto-reply with failure while
* AddResponseData allows the caller to try to deal with any CHIP_ERRORs.
*/
virtual void AddResponse(const ConcreteCommandPath & aRequestCommandPath, CommandId aResponseCommandId,
const DataModel::EncodableToTLV & aEncodable) = 0;
/**
* Check whether the InvokeRequest we are handling is a timed invoke.
*/
virtual bool IsTimedInvoke() const = 0;
/**
* @brief Flush acks right away for a slow command
*
* Some commands that do heavy lifting of storage/crypto should
* ack right away to improve reliability and reduce needless retries. This
* method can be manually called in commands that are especially slow to
* immediately schedule an acknowledgement (if needed) since the delayed
* stand-alone ack timer may actually not hit soon enough due to blocking command
* execution.
*
*/
virtual void FlushAcksRightAwayOnSlowCommand() = 0;
virtual Access::SubjectDescriptor GetSubjectDescriptor() const = 0;
/**
* Gets the inner exchange context object, without ownership.
*
* WARNING: This is dangerous, since it is directly interacting with the
* exchange being managed automatically by mpResponder and
* if not done carefully, may end up with use-after-free errors.
*
* @return The inner exchange context, might be nullptr if no
* exchange context has been assigned or the context
* has been released.
*/
virtual Messaging::ExchangeContext * GetExchangeContext() const = 0;
/**
* API for adding a data response. The template parameter T is generally
* expected to be a ClusterName::Commands::CommandName::Type struct, but any
* object that can be encoded using the DataModel::Encode machinery and
* exposes the right command id will work.
*
* If you have no great way of handling the returned CHIP_ERROR, consider
* using `AddResponse` which will automatically reply with `Failure` in
* case AddResponseData fails.
*
* @param [in] aRequestCommandPath the concrete path of the command we are
* responding to.
*
* The response path will be the same as the request, except the
* reply command ID used will be `CommandData::GetCommandId()` assumed
* to be a member of the templated type
*
* @param [in] aData the data for the response. It is expected to provide
* `GetCommandData` as a STATIC on its type as well as encode the
* correct data structure for building a reply.
*/
template <typename CommandData>
CHIP_ERROR AddResponseData(const ConcreteCommandPath & aRequestCommandPath, const CommandData & aData)
{
DataModel::EncodableType<CommandData> encoder(aData);
return AddResponseData(aRequestCommandPath, CommandData::GetCommandId(), encoder);
}
/**
* API for adding a response. This will try to encode a data response (response command), and if that fails
* it will encode a Protocols::InteractionModel::Status::Failure status response instead.
*
* Above is the main difference from AddResponseData: AddResponse will auto-reply with failure while
* AddResponseData allows the caller to try to deal with any CHIP_ERRORs.
*
* The template parameter T is generally expected to be a ClusterName::Commands::CommandName::Type struct, but any object that
* can be encoded using the DataModel::Encode machinery and exposes the right command id will work.
*
* Since the function will call AddStatus when it fails to encode the data, it cannot send any response when it fails to encode
* a status code since another AddStatus call will also fail. The error from AddStatus will just be logged.
*
* @param [in] aRequestCommandPath the concrete path of the command we are
* responding to.
* @param [in] aData the data for the response.
*/
template <typename CommandData>
void AddResponse(const ConcreteCommandPath & aRequestCommandPath, const CommandData & aData)
{
DataModel::EncodableType<CommandData> encodable(aData);
AddResponse(aRequestCommandPath, CommandData::GetCommandId(), encodable);
}
protected:
/**
* IncrementHoldOff will increase the inner refcount of the CommandHandler.
*
* Users should use CommandHandler::Handle for management the lifespan of the CommandHandler.
* DefRef should be released in reasonable time, and Close() should only be called when the refcount reached 0.
*/
virtual void IncrementHoldOff(Handle * apHandle) {}
/**
* DecrementHoldOff is used by CommandHandler::Handle for decreasing the refcount of the CommandHandler.
* When refcount reached 0, CommandHandler will send the response to the peer and shutdown.
*/
virtual void DecrementHoldOff(Handle * apHandle) {}
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,120 @@
/*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <access/SubjectDescriptor.h>
#include <lib/core/DataModelTypes.h>
#include <lib/core/GroupId.h>
#include <lib/core/Optional.h>
#include <messaging/ExchangeContext.h>
#include <protocols/interaction_model/StatusCode.h>
#include <system/SystemPacketBuffer.h>
namespace chip {
namespace app {
/**
* Interface for sending InvokeResponseMessage(s).
*
* Provides information about the associated exchange context.
*
* Design Rationale: This interface enhances unit testability and allows applications to
* customize InvokeResponse behavior. For example, a bridge application might locally execute
* a command using cluster APIs without intending to sending a response on an exchange.
* These cluster APIs require providing an instance of CommandHandler where a status response
* is added (see https://github.com/project-chip/connectedhomeip/issues/32030).
*/
class CommandHandlerExchangeInterface
{
public:
virtual ~CommandHandlerExchangeInterface() = default;
/**
* Get a non-owning pointer to the exchange context the InvokeRequestMessage was
* delivered on.
*
* @return The exchange context. Might be nullptr if no exchange context has been
* assigned or the context has been released. For example, the exchange
* context might not be assigned in unit tests, or if an application wishes
* to locally execute cluster APIs and still receive response data without
* sending it on an exchange.
*/
virtual Messaging::ExchangeContext * GetExchangeContext() const = 0;
// TODO(#30453): Follow up refactor. It should be possible to remove
// GetSubjectDescriptor and GetAccessingFabricIndex, as CommandHandler can get these
// values from ExchangeContext.
/**
* Gets subject descriptor of the exchange.
*
* WARNING: This method should only be called when the caller is certain the
* session has not been evicted.
*/
virtual Access::SubjectDescriptor GetSubjectDescriptor() const = 0;
/**
* Gets accessing fabic index of the exchange.
*
* WARNING: This method should only be called when the caller is certain the
* session has not been evicted.
*/
virtual FabricIndex GetAccessingFabricIndex() const = 0;
/**
* If session for the exchange is a group session, returns its group ID. Otherwise,
* returns a null optional.
*/
virtual Optional<GroupId> GetGroupId() const = 0;
/**
* @brief Called to indicate a slow command is being processed.
*
* Enables the exchange to send whatever transport-level acks might be needed without waiting
* for command processing to complete.
*/
virtual void HandlingSlowCommand() = 0;
/**
* @brief Adds a completed InvokeResponseMessage to the queue for sending to requester.
*
* Called by CommandHandler.
*/
virtual void AddInvokeResponseToSend(System::PacketBufferHandle && aPacket) = 0;
/**
* @brief Called to indicate that an InvokeResponse was dropped.
*
* Called by CommandHandler to relay this information to the requester.
*/
virtual void ResponseDropped() = 0;
/**
* @brief Gets the maximum size of a packet buffer to encode a Command
* Response message. This size depends on the underlying session used
* by the exchange.
*
* The size returned here is the size not including the prepended headers.
*
* Called by CommandHandler when allocating buffer for encoding the Command
* response.
*/
virtual size_t GetCommandResponseMaxBufferSize() = 0;
};
} // namespace app
} // namespace chip

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,483 @@
/*
* Copyright (c) 2020-2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CommandHandler.h>
#include <app/CommandPathRegistry.h>
#include <app/MessageDef/InvokeRequestMessage.h>
#include <app/MessageDef/InvokeResponseMessage.h>
#include <lib/core/TLV.h>
#include <lib/core/TLVDebug.h>
#include <lib/support/BitFlags.h>
#include <lib/support/Scoped.h>
#include <messaging/ExchangeHolder.h>
#include <messaging/Flags.h>
#include <protocols/Protocols.h>
#include <protocols/interaction_model/Constants.h>
#include <protocols/interaction_model/StatusCode.h>
#include <system/SystemPacketBuffer.h>
#include <system/TLVPacketBufferBackingStore.h>
namespace chip {
namespace app {
class CommandHandlerImpl : public CommandHandler
{
public:
class Callback
{
public:
virtual ~Callback() = default;
/*
* Method that signals to a registered callback that this object
* has completed doing useful work and is now safe for release/destruction.
*/
virtual void OnDone(CommandHandlerImpl & apCommandObj) = 0;
/*
* Upon processing of a CommandDataIB, this method is invoked to dispatch the command
* to the right server-side handler provided by the application.
*/
virtual void DispatchCommand(CommandHandlerImpl & apCommandObj, const ConcreteCommandPath & aCommandPath,
TLV::TLVReader & apPayload) = 0;
/*
* Check to see if a command implementation exists for a specific
* concrete command path. If it does, Success will be returned. If
* not, one of UnsupportedEndpoint, UnsupportedCluster, or
* UnsupportedCommand will be returned, depending on how the command
* fails to exist.
*/
virtual Protocols::InteractionModel::Status CommandExists(const ConcreteCommandPath & aCommandPath) = 0;
};
struct InvokeResponseParameters
{
InvokeResponseParameters(const ConcreteCommandPath & aRequestCommandPath) : mRequestCommandPath(aRequestCommandPath) {}
InvokeResponseParameters & SetStartOrEndDataStruct(bool aStartOrEndDataStruct)
{
mStartOrEndDataStruct = aStartOrEndDataStruct;
return *this;
}
ConcreteCommandPath mRequestCommandPath;
/**
* Whether the method this is being provided to should start/end the TLV container for the CommandFields element
* within CommandDataIB.
*/
bool mStartOrEndDataStruct = true;
};
struct TestOnlyOverrides
{
public:
CommandPathRegistry * commandPathRegistry = nullptr;
CommandHandlerExchangeInterface * commandResponder = nullptr;
};
/*
* The callback passed in has to outlive this CommandHandler object.
*/
CommandHandlerImpl(Callback * apCallback);
/*
* The destructor will also invalidate all Handles created for this CommandHandlerImpl.
*/
virtual ~CommandHandlerImpl();
/*
* Constructor to override the number of supported paths per invoke and command responder.
*
* The callback and any pointers passed via TestOnlyOverrides must outlive this
* CommandHandlerImpl object.
*
* For testing purposes.
*/
CommandHandlerImpl(TestOnlyOverrides & aTestOverride, Callback * apCallback);
/**************** CommandHandler interface implementation ***********************/
using CommandHandler::AddResponseData;
using CommandHandler::AddStatus;
using CommandHandler::FallibleAddStatus;
void FlushAcksRightAwayOnSlowCommand() override;
CHIP_ERROR FallibleAddStatus(const ConcreteCommandPath & aRequestCommandPath,
const Protocols::InteractionModel::ClusterStatusCode & aStatus,
const char * context = nullptr) override;
void AddStatus(const ConcreteCommandPath & aCommandPath, const Protocols::InteractionModel::ClusterStatusCode & aStatus,
const char * context = nullptr) override;
CHIP_ERROR AddResponseData(const ConcreteCommandPath & aRequestCommandPath, CommandId aResponseCommandId,
const DataModel::EncodableToTLV & aEncodable) override;
void AddResponse(const ConcreteCommandPath & aRequestCommandPath, CommandId aResponseCommandId,
const DataModel::EncodableToTLV & aEncodable) override;
Access::SubjectDescriptor GetSubjectDescriptor() const override;
FabricIndex GetAccessingFabricIndex() const override;
bool IsTimedInvoke() const override;
Messaging::ExchangeContext * GetExchangeContext() const override;
/**************** Implementation-specific logic ***********************/
/*
* Main entrypoint for this class to handle an InvokeRequestMessage.
*
* This function MAY call the registered OnDone callback before returning.
* To prevent immediate OnDone invocation, callers can wrap their CommandHandlerImpl instance
* within a CommandHandler::Handle.
*
* isTimedInvoke is true if and only if this is part of a Timed Invoke
* transaction (i.e. was preceded by a Timed Request). If we reach here,
* the timer verification has already been done.
*
* commandResponder handles sending InvokeResponses, added by clusters, to the client. The
* command responder object must outlive this CommandHandler object. It is only safe to
* release after the caller of OnInvokeCommandRequest receives the OnDone callback.
*/
Protocols::InteractionModel::Status OnInvokeCommandRequest(CommandHandlerExchangeInterface & commandResponder,
System::PacketBufferHandle && payload, bool isTimedInvoke);
/**
* Checks that all CommandDataIB within InvokeRequests satisfy the spec's general
* constraints for CommandDataIB. Additionally checks that InvokeRequestMessage is
* properly formatted.
*
* This also builds a registry to ensure that all commands can be responded
* to with the data required as per spec.
*/
CHIP_ERROR ValidateInvokeRequestMessageAndBuildRegistry(InvokeRequestMessage::Parser & invokeRequestMessage);
/**
* This adds a new CommandDataIB element into InvokeResponses for the associated
* aRequestCommandPath. This adds up until the `CommandFields` element within
* `CommandDataIB`.
*
* This call will fail if CommandHandler is already in the middle of building a
* CommandStatusIB or CommandDataIB (i.e. something has called Prepare*, without
* calling Finish*), or is already sending InvokeResponseMessage.
*
* Upon success, the caller is expected to call `FinishCommand` once they have added
* all the fields into the CommandFields element of CommandDataIB.
*
* @param [in] aResponseCommandPath the concrete response path that we are sending to Requester.
* @param [in] aPrepareParameters struct containing paramters needs for preparing a command. Data
* such as request path, and whether this method should start the CommandFields element within
* CommandDataIB.
*/
CHIP_ERROR PrepareInvokeResponseCommand(const ConcreteCommandPath & aResponseCommandPath,
const InvokeResponseParameters & aPrepareParameters);
/**
* Finishes the CommandDataIB element within the InvokeResponses.
*
* Caller must have first successfully called `PrepareInvokeResponseCommand`.
*
* @param [in] aEndDataStruct end the TLV container for the CommandFields element within
* CommandDataIB. This should match the boolean passed into Prepare*.
*
* @return CHIP_ERROR_INCORRECT_STATE
* If device has not previously successfully called
* `PrepareInvokeResponseCommand`.
* @return CHIP_ERROR_BUFFER_TOO_SMALL
* If writing the values needed to finish the InvokeReponseIB
* with the current contents of the InvokeResponseMessage
* would exceed the limit. When this error occurs, it is possible
* we have already closed some of the IB Builders that were
* previously started in `PrepareInvokeResponseCommand`.
* @return CHIP_ERROR_NO_MEMORY
* If TLVWriter attempted to allocate an output buffer failed due to
* lack of memory.
* @return other Other TLVWriter related errors. Typically occurs if
* `GetCommandDataIBTLVWriter()` was called and used incorrectly.
*/
// TODO(#30453): We should be able to eliminate the chances of OOM issues with reserve.
// This will be completed in a follow up PR.
CHIP_ERROR FinishCommand(bool aEndDataStruct = true);
TLV::TLVWriter * GetCommandDataIBTLVWriter();
#if CHIP_WITH_NLFAULTINJECTION
enum class NlFaultInjectionType : uint8_t
{
SeparateResponseMessages,
SeparateResponseMessagesAndInvertedResponseOrder,
SkipSecondResponse
};
/**
* @brief Sends InvokeResponseMessages with injected faults for certification testing.
*
* The Test Harness (TH) uses this to simulate various server response behaviors,
* ensuring the Device Under Test (DUT) handles responses per specification.
*
* This function strictly validates the DUT's InvokeRequestMessage against the test plan.
* If deviations occur, the TH terminates with a detailed error message.
*
* @param commandResponder commandResponder that will send the InvokeResponseMessages to the client.
* @param payload Payload of the incoming InvokeRequestMessage from the client.
* @param isTimedInvoke Indicates whether the interaction is timed.
* @param faultType The specific type of fault to inject into the response.
*/
// TODO(#30453): After refactoring CommandHandler for better unit testability, create a
// unit test specifically for the fault injection behavior.
void TestOnlyInvokeCommandRequestWithFaultsInjected(CommandHandlerExchangeInterface & commandResponder,
System::PacketBufferHandle && payload, bool isTimedInvoke,
NlFaultInjectionType faultType);
#endif // CHIP_WITH_NLFAULTINJECTION
protected:
// Lifetime management for CommandHandler::Handle
void IncrementHoldOff(Handle * apHandle) override;
void DecrementHoldOff(Handle * apHandle) override;
private:
friend class TestCommandInteraction;
friend class CommandHandler::Handle;
enum class State : uint8_t
{
Idle, ///< Default state that the object starts out in, where no work has commenced
NewResponseMessage, ///< mInvokeResponseBuilder is ready, with no responses added.
Preparing, ///< We are prepaing the command or status header.
AddingCommand, ///< In the process of adding a command.
AddedCommand, ///< A command has been completely encoded and is awaiting transmission.
DispatchResponses, ///< The command response(s) are being dispatched.
AwaitingDestruction, ///< The object has completed its work and is awaiting destruction by the application.
};
/**
* @brief Best effort to add InvokeResponse to InvokeResponseMessage.
*
* Tries to add response using lambda. Upon failure to add response, attempts
* to rollback the InvokeResponseMessage to a known good state. If failure is due
* to insufficient space in the current InvokeResponseMessage:
* - Finalizes the current InvokeResponseMessage.
* - Allocates a new InvokeResponseMessage.
* - Reattempts to add the InvokeResponse to the new InvokeResponseMessage.
*
* @param [in] addResponseFunction A lambda function responsible for adding the
* response to the current InvokeResponseMessage.
*/
template <typename Function>
CHIP_ERROR TryAddingResponse(Function && addResponseFunction)
{
// Invalidate any existing rollback backups. The addResponseFunction is
// expected to create a new backup during either PrepareInvokeResponseCommand
// or PrepareStatus execution. Direct invocation of
// CreateBackupForResponseRollback is avoided since the buffer used by
// InvokeResponseMessage might not be allocated until a Prepare* function
// is called.
mRollbackBackupValid = false;
CHIP_ERROR err = addResponseFunction();
if (err == CHIP_NO_ERROR)
{
return CHIP_NO_ERROR;
}
// The error value of RollbackResponse is not important if it fails, we prioritize
// conveying the error generated by addResponseFunction to the caller.
if (RollbackResponse() != CHIP_NO_ERROR)
{
return err;
}
// If we failed to add a command due to lack of space in the
// packet, we will make another attempt to add the response using
// an additional InvokeResponseMessage.
if (mState != State::AddedCommand || err != CHIP_ERROR_NO_MEMORY)
{
return err;
}
ReturnErrorOnFailure(FinalizeInvokeResponseMessageAndPrepareNext());
err = addResponseFunction();
if (err != CHIP_NO_ERROR)
{
// The return value of RollbackResponse is ignored, as we prioritize
// conveying the error generated by addResponseFunction to the
// caller.
RollbackResponse();
}
return err;
}
void MoveToState(const State aTargetState);
const char * GetStateStr() const;
/**
* Create a backup to enable rolling back to the state prior to ResponseData encoding in the event of failure.
*/
void CreateBackupForResponseRollback();
/**
* Rollback the state to before encoding the current ResponseData (before calling PrepareInvokeResponseCommand / PrepareStatus)
*
* Requires CreateBackupForResponseRollback to be called at the start of PrepareInvokeResponseCommand / PrepareStatus
*/
CHIP_ERROR RollbackResponse();
/*
* This forcibly closes the exchange context if a valid one is pointed to. Such a situation does
* not arise during normal message processing flows that all normally call Close() above. This can only
* arise due to application-initiated destruction of the object when this object is handling receiving/sending
* message payloads.
*/
void Abort();
/*
* Allocates a packet buffer used for encoding an invoke response payload.
*
* This can be called multiple times safely, as it will only allocate the buffer once for the lifetime
* of this object.
*/
CHIP_ERROR AllocateBuffer();
/**
* This will add a new CommandStatusIB element into InvokeResponses. It will put the
* aCommandPath into the CommandPath element within CommandStatusIB.
*
* This call will fail if CommandHandler is already in the middle of building a
* CommandStatusIB or CommandDataIB (i.e. something has called Prepare*, without
* calling Finish*), or is already sending InvokeResponseMessage.
*
* Upon success, the caller is expected to call `FinishStatus` once they have encoded
* StatusIB.
*
* @param [in] aCommandPath the concrete path of the command we are responding to.
*/
CHIP_ERROR PrepareStatus(const ConcreteCommandPath & aCommandPath);
/**
* Finishes the CommandStatusIB element within the InvokeResponses.
*
* Caller must have first successfully called `PrepareStatus`.
*/
CHIP_ERROR FinishStatus();
CHIP_ERROR PrepareInvokeResponseCommand(const CommandPathRegistryEntry & apCommandPathRegistryEntry,
const ConcreteCommandPath & aCommandPath, bool aStartDataStruct);
CHIP_ERROR FinalizeLastInvokeResponseMessage() { return FinalizeInvokeResponseMessage(/* aHasMoreChunks = */ false); }
CHIP_ERROR FinalizeInvokeResponseMessageAndPrepareNext();
CHIP_ERROR FinalizeInvokeResponseMessage(bool aHasMoreChunks);
Protocols::InteractionModel::Status ProcessInvokeRequest(System::PacketBufferHandle && payload, bool isTimedInvoke);
/**
* Called internally to signal the completion of all work on this object, gracefully close the
* exchange (by calling into the base class) and finally, signal to a registerd callback that it's
* safe to release this object.
*/
void Close();
/**
* ProcessCommandDataIB is only called when a unicast invoke command request is received
* It requires the endpointId in its command path to be able to dispatch the command
*/
Protocols::InteractionModel::Status ProcessCommandDataIB(CommandDataIB::Parser & aCommandElement);
/**
* ProcessGroupCommandDataIB is only called when a group invoke command request is received
* It doesn't need the endpointId in it's command path since it uses the GroupId in message metadata to find it
*/
Protocols::InteractionModel::Status ProcessGroupCommandDataIB(CommandDataIB::Parser & aCommandElement);
CHIP_ERROR TryAddStatusInternal(const ConcreteCommandPath & aCommandPath, const StatusIB & aStatus);
CHIP_ERROR AddStatusInternal(const ConcreteCommandPath & aCommandPath, const StatusIB & aStatus);
/**
* If this function fails, it may leave our TLV buffer in an inconsistent state.
* Callers should snapshot as needed before calling this function, and roll back
* as needed afterward.
*
* @param [in] aRequestCommandPath the concrete path of the command we are responding to
* @param [in] aResponseCommandId the id of the command to encode
* @param [in] aEncodable the data to encode for the given aResponseCommandId
*/
CHIP_ERROR TryAddResponseData(const ConcreteCommandPath & aRequestCommandPath, CommandId aResponseCommandId,
const DataModel::EncodableToTLV & aEncodable);
void SetExchangeInterface(CommandHandlerExchangeInterface * commandResponder);
/**
* Check whether the InvokeRequest we are handling is targeted to a group.
*/
bool IsGroupRequest() { return mGroupRequest; }
bool ResponsesAccepted() { return !(mGroupRequest || mpResponder == nullptr); }
/**
* Sets the state flag to keep the information that request we are handling is targeted to a group.
*/
void SetGroupRequest(bool isGroupRequest) { mGroupRequest = isGroupRequest; }
CommandPathRegistry & GetCommandPathRegistry() const { return *mCommandPathRegistry; }
size_t MaxPathsPerInvoke() const { return mMaxPathsPerInvoke; }
void AddToHandleList(Handle * handle);
void RemoveFromHandleList(Handle * handle);
void InvalidateHandles();
bool TestOnlyIsInIdleState() const { return mState == State::Idle; }
Callback * mpCallback = nullptr;
InvokeResponseMessage::Builder mInvokeResponseBuilder;
TLV::TLVType mDataElementContainerType = TLV::kTLVType_NotSpecified;
size_t mPendingWork = 0;
/* List to store all currently-outstanding Handles for this Command Handler.*/
IntrusiveList<Handle> mpHandleList;
chip::System::PacketBufferTLVWriter mCommandMessageWriter;
TLV::TLVWriter mBackupWriter;
size_t mMaxPathsPerInvoke = CHIP_CONFIG_MAX_PATHS_PER_INVOKE;
// TODO(#30453): See if we can reduce this size for the default cases
// TODO Allow flexibility in registration.
BasicCommandPathRegistry<CHIP_CONFIG_MAX_PATHS_PER_INVOKE> mBasicCommandPathRegistry;
CommandPathRegistry * mCommandPathRegistry = &mBasicCommandPathRegistry;
std::optional<uint16_t> mRefForResponse;
CommandHandlerExchangeInterface * mpResponder = nullptr;
State mState = State::Idle;
State mBackupState;
ScopedChangeOnly<bool> mInternalCallToAddResponseData{ false };
bool mSuppressResponse = false;
bool mTimedRequest = false;
bool mGroupRequest = false;
bool mBufferAllocated = false;
bool mReserveSpaceForMoreChunkMessages = false;
// TODO(#32486): We should introduce breaking change where calls to add CommandData
// need to use AddResponse, and not CommandHandler primitives directly using
// GetCommandDataIBTLVWriter.
bool mRollbackBackupValid = false;
// If mGoneAsync is true, we have finished out initial processing of the
// incoming invoke. After this point, our session could go away at any
// time.
bool mGoneAsync = false;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,236 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CommandHandler.h>
#include <app/ConcreteClusterPath.h>
#include <app/ConcreteCommandPath.h>
#include <app/data-model/Decode.h>
#include <app/data-model/List.h> // So we can encode lists
#include <lib/core/DataModelTypes.h>
#include <lib/support/Iterators.h>
namespace chip {
namespace app {
/*
* This interface permits applications to register a server-side command handler
* at run-time for a given cluster. The handler can either be configured to handle all endpoints
* for the given cluster or only handle a specific endpoint.
*
* If a command is not handled through this interface, it will default to invoking the generated DispatchSingleClusterCommand
* instead.
*
*/
class CommandHandlerInterface
{
public:
struct HandlerContext
{
public:
HandlerContext(CommandHandler & commandHandler, const ConcreteCommandPath & requestPath, TLV::TLVReader & aReader) :
mCommandHandler(commandHandler), mRequestPath(requestPath), mPayload(aReader)
{}
void SetCommandHandled() { mCommandHandled = true; }
void SetCommandNotHandled() { mCommandHandled = false; }
/*
* Returns a TLVReader positioned at the TLV struct that contains the payload of the command.
*
* If the reader is requested from the context, then we can assume there is an intention
* to access the payload of this command and consequently, to handle this command.
*
* If this is not true, the application should call SetCommandNotHandled().
*
*/
TLV::TLVReader & GetReader()
{
SetCommandHandled();
return mPayload;
}
CommandHandler & mCommandHandler;
const ConcreteCommandPath & mRequestPath;
TLV::TLVReader & mPayload;
bool mCommandHandled = false;
};
/**
* aEndpointId can be Missing to indicate that this object is meant to be
* used with all endpoints.
*/
CommandHandlerInterface(Optional<EndpointId> aEndpointId, ClusterId aClusterId) :
mEndpointId(aEndpointId), mClusterId(aClusterId)
{}
virtual ~CommandHandlerInterface() {}
/**
* Callback that must be implemented to handle an invoke request.
*
* The callee is required to handle *all* errors that may occur during the handling of this command,
* including errors like those encountered during decode and encode of the payloads as
* well as application-specific errors. As part of handling the error, the callee is required
* to handle generation of an appropriate status response.
*
* The only exception to this rule is if the HandleCommand helper method is used below - it will
* help handle some of these cases (see below).
*
* @param [in] handlerContext Context that encapsulates the current invoke request.
* Handlers are responsible for correctly calling SetCommandHandled()
* on the context if they did handle the command.
*
* This is not necessary if the HandleCommand() method below is invoked.
*/
virtual void InvokeCommand(HandlerContext & handlerContext) = 0;
typedef Loop (*CommandIdCallback)(CommandId id, void * context);
/**
* Function that may be implemented to enumerate accepted (client-to-server)
* commands for the given cluster.
*
* If this function returns CHIP_ERROR_NOT_IMPLEMENTED, the list of accepted
* commands will come from the endpoint metadata for the cluster.
*
* If this function returns any other error, that will be treated as an
* error condition by the caller, and handling will depend on the caller.
*
* Otherwise the list of accepted commands will be the list of values passed
* to the provided callback.
*
* The implementation _must_ pass the provided context to the callback.
*
* If the callback returns Loop::Break, there must be no more calls to it.
* This is used by callbacks that just look for a particular value in the
* list.
*/
virtual CHIP_ERROR EnumerateAcceptedCommands(const ConcreteClusterPath & cluster, CommandIdCallback callback, void * context)
{
return CHIP_ERROR_NOT_IMPLEMENTED;
}
/**
* Function that may be implemented to enumerate generated (response)
* commands for the given cluster.
*
* If this function returns CHIP_ERROR_NOT_IMPLEMENTED, the list of
* generated commands will come from the endpoint metadata for the cluster.
*
* If this function returns any other error, that will be treated as an
* error condition by the caller, and handling will depend on the caller.
*
* Otherwise the list of generated commands will be the list of values
* passed to the provided callback.
*
* The implementation _must_ pass the provided context to the callback.
*
* If the callback returns Loop::Break, there must be no more calls to it.
* This is used by callbacks that just look for a particular value in the
* list.
*/
virtual CHIP_ERROR EnumerateGeneratedCommands(const ConcreteClusterPath & cluster, CommandIdCallback callback, void * context)
{
return CHIP_ERROR_NOT_IMPLEMENTED;
}
/**
* Mechanism for keeping track of a chain of CommandHandlerInterface.
*/
void SetNext(CommandHandlerInterface * aNext) { mNext = aNext; }
CommandHandlerInterface * GetNext() const { return mNext; }
/**
* Check whether a this CommandHandlerInterface is relevant for a
* particular endpoint+cluster. An CommandHandlerInterface will be used
* for an invoke from a particular cluster only when this function returns
* true.
*/
bool Matches(EndpointId aEndpointId, ClusterId aClusterId) const
{
return (!mEndpointId.HasValue() || mEndpointId.Value() == aEndpointId) && mClusterId == aClusterId;
}
/**
* Check whether an CommandHandlerInterface is relevant for a particular
* specific endpoint. This is used to clean up overrides registered for an
* endpoint that becomes disabled.
*/
bool MatchesEndpoint(EndpointId aEndpointId) const { return mEndpointId.HasValue() && mEndpointId.Value() == aEndpointId; }
/**
* Check whether another CommandHandlerInterface wants to handle the same set of
* commands as we do.
*/
bool Matches(const CommandHandlerInterface & aOther) const
{
return mClusterId == aOther.mClusterId &&
(!mEndpointId.HasValue() || !aOther.mEndpointId.HasValue() || mEndpointId.Value() == aOther.mEndpointId.Value());
}
protected:
/*
* Helper function to automatically de-serialize the data payload into a cluster object
* of type RequestT if the Cluster ID and Command ID in the context match. Upon successful
* de-serialization, the provided function is invoked and passed in a reference to the cluster object.
*
* Any errors encountered in this function prior to calling func result in the automatic generation of a status response.
* If `func` is called, the responsibility for doing so shifts to the callee to handle any further errors that are encountered.
*
* The provided function is expected to have the following signature:
* void Func(HandlerContext &handlerContext, const RequestT &requestPayload);
*/
template <typename RequestT, typename FuncT>
void HandleCommand(HandlerContext & handlerContext, FuncT func)
{
if (!handlerContext.mCommandHandled && (handlerContext.mRequestPath.mClusterId == RequestT::GetClusterId()) &&
(handlerContext.mRequestPath.mCommandId == RequestT::GetCommandId()))
{
RequestT requestPayload;
//
// If the command matches what the caller is looking for, let's mark this as being handled
// even if errors happen after this. This ensures that we don't execute any fall-back strategies
// to handle this command since at this point, the caller is taking responsibility for handling
// the command in its entirety, warts and all.
//
handlerContext.SetCommandHandled();
if (DataModel::Decode(handlerContext.mPayload, requestPayload) != CHIP_NO_ERROR)
{
handlerContext.mCommandHandler.AddStatus(handlerContext.mRequestPath,
Protocols::InteractionModel::Status::InvalidCommand);
return;
}
func(handlerContext, requestPayload);
}
}
Optional<EndpointId> GetEndpointId() { return mEndpointId; }
private:
Optional<EndpointId> mEndpointId;
ClusterId mClusterId;
CommandHandlerInterface * mNext = nullptr;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,141 @@
/**
* Copyright (c) 2024 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/CommandHandlerInterfaceRegistry.h>
using namespace chip::app;
namespace chip {
namespace app {
CommandHandlerInterfaceRegistry & CommandHandlerInterfaceRegistry::Instance()
{
static CommandHandlerInterfaceRegistry registry;
return registry;
}
void CommandHandlerInterfaceRegistry::UnregisterAllHandlers()
{
CommandHandlerInterface * handlerIter = mCommandHandlerList;
//
// Walk our list of command handlers and de-register them, before finally
// nulling out the list entirely.
//
while (handlerIter)
{
CommandHandlerInterface * nextHandler = handlerIter->GetNext();
handlerIter->SetNext(nullptr);
handlerIter = nextHandler;
}
mCommandHandlerList = nullptr;
}
CHIP_ERROR CommandHandlerInterfaceRegistry::RegisterCommandHandler(CommandHandlerInterface * handler)
{
VerifyOrReturnError(handler != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
for (auto * cur = mCommandHandlerList; cur; cur = cur->GetNext())
{
if (cur->Matches(*handler))
{
ChipLogError(InteractionModel, "Duplicate command handler registration failed");
return CHIP_ERROR_INCORRECT_STATE;
}
}
handler->SetNext(mCommandHandlerList);
mCommandHandlerList = handler;
return CHIP_NO_ERROR;
}
void CommandHandlerInterfaceRegistry::UnregisterAllCommandHandlersForEndpoint(EndpointId endpointId)
{
CommandHandlerInterface * prev = nullptr;
for (auto * cur = mCommandHandlerList; cur;)
{
// Fetch next node in the list before we remove this one.
auto * next = cur->GetNext();
if (cur->MatchesEndpoint(endpointId))
{
if (prev == nullptr)
{
mCommandHandlerList = cur->GetNext();
}
else
{
prev->SetNext(cur->GetNext());
}
cur->SetNext(nullptr);
}
else
{
prev = cur;
}
cur = next;
}
}
CHIP_ERROR CommandHandlerInterfaceRegistry::UnregisterCommandHandler(CommandHandlerInterface * handler)
{
VerifyOrReturnError(handler != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
CommandHandlerInterface * prev = nullptr;
for (auto * cur = mCommandHandlerList; cur; cur = cur->GetNext())
{
if (cur->Matches(*handler))
{
if (prev == nullptr)
{
mCommandHandlerList = cur->GetNext();
}
else
{
prev->SetNext(cur->GetNext());
}
cur->SetNext(nullptr);
return CHIP_NO_ERROR;
}
prev = cur;
}
return CHIP_ERROR_KEY_NOT_FOUND;
}
CommandHandlerInterface * CommandHandlerInterfaceRegistry::GetCommandHandler(EndpointId endpointId, ClusterId clusterId)
{
for (auto * cur = mCommandHandlerList; cur; cur = cur->GetNext())
{
if (cur->Matches(endpointId, clusterId))
{
return cur;
}
}
return nullptr;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,60 @@
/**
* Copyright (c) 2024 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CommandHandlerInterface.h>
namespace chip {
namespace app {
/// Keeps track of a list of registered command handler interfaces
///
/// NOTE: command handler interface objects are IntrusiveList elements (i.e.
/// their pointers are contained within). As a result, a command handler
/// may only ever be part of a single registry.
class CommandHandlerInterfaceRegistry
{
public:
/// Remove the entire linked list of handlers
void UnregisterAllHandlers();
/// Add a new handler to the list of registered command handlers
///
/// At most one command handler can exist for a given endpoint/cluster combination. Trying
/// to register conflicting handlers will result in a `CHIP_ERROR_INCORRECT_STATE` error.
CHIP_ERROR RegisterCommandHandler(CommandHandlerInterface * handler);
/// Unregister all commandHandlers that `MatchesEndpoint` for the given endpointId.
void UnregisterAllCommandHandlersForEndpoint(EndpointId endpointId);
/// Unregister a single handler.
///
/// If the handler is not registered, a `CHIP_ERROR_KEY_NOT_FOUND` is returned.
CHIP_ERROR UnregisterCommandHandler(CommandHandlerInterface * handler);
/// Find the command handler for the given endpoint/cluster combination or return
/// nullptr if no such command handler exists.
CommandHandlerInterface * GetCommandHandler(EndpointId endpointId, ClusterId clusterId);
/// A global instance of a command handler registry
static CommandHandlerInterfaceRegistry & Instance();
private:
CommandHandlerInterface * mCommandHandlerList = nullptr;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,82 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/util/basic-types.h>
#include <lib/core/GroupId.h>
#include <lib/support/BitFlags.h>
namespace chip {
namespace app {
enum class CommandPathFlags : uint8_t
{
kEndpointIdValid = 0x01,
kGroupIdValid = 0x02,
};
struct CommandPathParams
{
CommandPathParams(EndpointId aEndpointId, GroupId aGroupId, ClusterId aClusterId, CommandId aCommandId,
const BitFlags<CommandPathFlags> & aFlags) :
mEndpointId(aEndpointId),
mGroupId(aGroupId), mClusterId(aClusterId), mCommandId(aCommandId), mFlags(aFlags)
{}
CommandPathParams(uint16_t aId, ClusterId aClusterId, CommandId aCommandId, const BitFlags<CommandPathFlags> & aFlags) :
mClusterId(aClusterId), mCommandId(aCommandId), mFlags(aFlags)
{
if (aFlags == CommandPathFlags::kEndpointIdValid)
{
mEndpointId = aId;
}
else if (aFlags == CommandPathFlags::kGroupIdValid)
{
mGroupId = aId;
}
}
bool IsSamePath(const CommandPathParams & other) const
{
if (other.mClusterId != mClusterId || other.mCommandId != mCommandId)
{
return false;
}
if (mFlags != other.mFlags)
{
return false;
}
if (mFlags == CommandPathFlags::kEndpointIdValid && other.mEndpointId != mEndpointId)
{
return false;
}
if (mFlags == CommandPathFlags::kGroupIdValid && other.mGroupId != mGroupId)
{
return false;
}
return true;
}
EndpointId mEndpointId = 0;
GroupId mGroupId = 0;
ClusterId mClusterId = 0;
CommandId mCommandId = 0;
BitFlags<CommandPathFlags> mFlags;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,120 @@
/*
*
* Copyright (c) 2023 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <stddef.h>
#include <app/ConcreteCommandPath.h>
#include <lib/core/CHIPError.h>
#include <lib/core/Optional.h>
#include <optional>
namespace chip {
namespace app {
struct CommandPathRegistryEntry
{
ConcreteCommandPath requestPath = ConcreteCommandPath(0, 0, 0);
std::optional<uint16_t> ref;
};
class CommandPathRegistry
{
public:
virtual ~CommandPathRegistry() = default;
virtual std::optional<CommandPathRegistryEntry> Find(const ConcreteCommandPath & requestPath) const = 0;
virtual std::optional<CommandPathRegistryEntry> GetFirstEntry() const = 0;
virtual CHIP_ERROR Add(const ConcreteCommandPath & requestPath, const std::optional<uint16_t> & ref) = 0;
virtual size_t Count() const = 0;
virtual size_t MaxSize() const = 0;
};
/**
* @class BasicCommandPathRegistry
*
* @brief Allows looking up CommandRef using the requested ConcreteCommandPath.
*
* While there are faster implementations, right now batch commands are capped at a low number due to
* message size constraints. All commands need to be contained within a single InvokeRequest. In
* practice this is usually less than 60 commands (but could be much more with TCP transports or
* newer transports).
*/
template <size_t N>
class BasicCommandPathRegistry : public CommandPathRegistry
{
public:
std::optional<CommandPathRegistryEntry> Find(const ConcreteCommandPath & requestPath) const override
{
for (size_t i = 0; i < mCount; i++)
{
if (mTable[i].requestPath == requestPath)
{
return std::make_optional(mTable[i]);
}
}
return std::nullopt;
}
std::optional<CommandPathRegistryEntry> GetFirstEntry() const override
{
if (mCount > 0)
{
return std::make_optional(mTable[0]);
}
return std::nullopt;
}
CHIP_ERROR Add(const ConcreteCommandPath & requestPath, const std::optional<uint16_t> & ref) override
{
if (mCount >= N)
{
return CHIP_ERROR_NO_MEMORY;
}
for (size_t i = 0; i < mCount; i++)
{
if (mTable[i].requestPath == requestPath)
{
return CHIP_ERROR_DUPLICATE_KEY_ID;
}
// No need to check if either has value. This is because if there is more than
// 1 entry in the table expectation is to have all entirely unique ref values
// so duplicate optional would mean we would want to error out.
if (mTable[i].ref == ref)
{
return CHIP_ERROR_DUPLICATE_KEY_ID;
}
}
mTable[mCount] = CommandPathRegistryEntry{ requestPath, ref };
mCount++;
return CHIP_NO_ERROR;
}
virtual size_t Count() const override { return mCount; }
virtual size_t MaxSize() const override { return N; }
private:
size_t mCount = 0;
CommandPathRegistryEntry mTable[N];
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,75 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CommandHandler.h>
#include <app/ConcreteCommandPath.h>
#include <protocols/interaction_model/Constants.h>
#include <protocols/interaction_model/StatusCode.h>
namespace chip {
namespace app {
template <typename CommandData>
class CommandResponseHelper
{
public:
CommandResponseHelper(app::CommandHandler * command, const app::ConcreteCommandPath & commandPath) :
mCommandHandler(command), mCommandPath(commandPath), mSentResponse(false)
{}
CHIP_ERROR Success(const CommandData & aResponse)
{
mCommandHandler->AddResponse(mCommandPath, aResponse);
mSentResponse = true;
return CHIP_NO_ERROR;
}
CHIP_ERROR Success()
{
CHIP_ERROR err = mCommandHandler->FallibleAddStatus(mCommandPath, Protocols::InteractionModel::Status::Success);
mSentResponse = (err == CHIP_NO_ERROR);
return err;
}
CHIP_ERROR Failure(Protocols::InteractionModel::Status aStatus)
{
CHIP_ERROR err = mCommandHandler->FallibleAddStatus(mCommandPath, aStatus);
mSentResponse = (err == CHIP_NO_ERROR);
return err;
}
CHIP_ERROR Failure(ClusterStatus aClusterStatus)
{
CHIP_ERROR err = mCommandHandler->FallibleAddStatus(
mCommandPath, Protocols::InteractionModel::ClusterStatusCode::ClusterSpecificFailure(aClusterStatus));
mSentResponse = (err == CHIP_NO_ERROR);
return err;
}
bool HasSentResponse() const { return mSentResponse; }
private:
app::CommandHandler * mCommandHandler;
app::ConcreteCommandPath mCommandPath;
bool mSentResponse;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,264 @@
/*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "CommandResponseSender.h"
#include "InteractionModelEngine.h"
#include "messaging/ExchangeContext.h"
namespace chip {
namespace app {
using Status = Protocols::InteractionModel::Status;
CHIP_ERROR CommandResponseSender::OnMessageReceived(Messaging::ExchangeContext * apExchangeContext,
const PayloadHeader & aPayloadHeader, System::PacketBufferHandle && aPayload)
{
CHIP_ERROR err = CHIP_NO_ERROR;
Optional<Status> failureStatusToSend;
if (mState == State::AwaitingStatusResponse &&
aPayloadHeader.HasMessageType(Protocols::InteractionModel::MsgType::StatusResponse))
{
CHIP_ERROR statusError = CHIP_NO_ERROR;
err = StatusResponse::ProcessStatusResponse(std::move(aPayload), statusError);
VerifyOrExit(err == CHIP_NO_ERROR, failureStatusToSend.SetValue(Status::InvalidAction));
err = statusError;
VerifyOrExit(err == CHIP_NO_ERROR, failureStatusToSend.SetValue(Status::InvalidAction));
err = SendCommandResponse();
// If SendCommandResponse() fails, we must close the exchange. We signal the failure to the
// requester with a StatusResponse ('Failure'). Since we're in the middle of processing an
// incoming message, we close the exchange by indicating that we don't expect a further response.
VerifyOrExit(err == CHIP_NO_ERROR, failureStatusToSend.SetValue(Status::Failure));
bool moreToSend = !mChunks.IsNull();
if (!moreToSend)
{
// We are sending the final message and do not anticipate any further responses. We are
// calling ExitNow() to immediately execute Close() and subsequently return from this function.
ExitNow();
}
return CHIP_NO_ERROR;
}
ChipLogDetail(DataManagement, "CommandResponseSender: Unexpected message type %d", aPayloadHeader.GetMessageType());
err = CHIP_ERROR_INVALID_MESSAGE_TYPE;
if (mState != State::AllInvokeResponsesSent)
{
failureStatusToSend.SetValue(Status::Failure);
ExitNow();
}
StatusResponse::Send(Status::InvalidAction, mExchangeCtx.Get(), false /*aExpectResponse*/);
return err;
exit:
if (failureStatusToSend.HasValue())
{
StatusResponse::Send(failureStatusToSend.Value(), mExchangeCtx.Get(), false /*aExpectResponse*/);
}
Close();
return err;
}
void CommandResponseSender::OnResponseTimeout(Messaging::ExchangeContext * apExchangeContext)
{
ChipLogDetail(DataManagement, "CommandResponseSender: Timed out waiting for response from requester mState=[%10.10s]",
GetStateStr());
Close();
}
void CommandResponseSender::StartSendingCommandResponses()
{
VerifyOrDie(mState == State::ReadyForInvokeResponses);
CHIP_ERROR err = SendCommandResponse();
if (err != CHIP_NO_ERROR)
{
ChipLogError(DataManagement, "Failed to send InvokeResponseMessage");
// TODO(#30453): It should be our responsibility to send a Failure StatusResponse to the requestor
// if there is a SessionHandle, but legacy unit tests explicitly check the behavior where
// we do not send any message. Changing this behavior should be done in a standalone
// PR where only that specific change is made. Here is a possible solution that could
// be used that fulfills our responsibility to send a Failure StatusResponse. This causes unit
// tests to start failing.
// ```
// if (mExchangeCtx && mExchangeCtx->HasSessionHandle())
// {
// SendStatusResponse(Status::Failure);
// }
// ```
Close();
return;
}
if (HasMoreToSend())
{
MoveToState(State::AwaitingStatusResponse);
mExchangeCtx->SetDelegate(this);
}
else
{
Close();
}
}
void CommandResponseSender::OnDone(CommandHandlerImpl & apCommandObj)
{
if (mState == State::ErrorSentDelayCloseUntilOnDone)
{
// We have already sent a message to the client indicating that we are not expecting
// a response.
Close();
return;
}
StartSendingCommandResponses();
}
void CommandResponseSender::DispatchCommand(CommandHandlerImpl & apCommandObj, const ConcreteCommandPath & aCommandPath,
TLV::TLVReader & apPayload)
{
VerifyOrReturn(mpCommandHandlerCallback);
mpCommandHandlerCallback->DispatchCommand(apCommandObj, aCommandPath, apPayload);
}
Status CommandResponseSender::CommandExists(const ConcreteCommandPath & aCommandPath)
{
VerifyOrReturnValue(mpCommandHandlerCallback, Protocols::InteractionModel::Status::UnsupportedCommand);
return mpCommandHandlerCallback->CommandExists(aCommandPath);
}
CHIP_ERROR CommandResponseSender::SendCommandResponse()
{
VerifyOrReturnError(HasMoreToSend(), CHIP_ERROR_INCORRECT_STATE);
if (mChunks.IsNull())
{
VerifyOrReturnError(mReportResponseDropped, CHIP_ERROR_INCORRECT_STATE);
SendStatusResponse(Status::ResourceExhausted);
mReportResponseDropped = false;
return CHIP_NO_ERROR;
}
System::PacketBufferHandle commandResponsePayload = mChunks.PopHead();
Messaging::SendFlags sendFlag = Messaging::SendMessageFlags::kNone;
if (HasMoreToSend())
{
sendFlag = Messaging::SendMessageFlags::kExpectResponse;
mExchangeCtx->UseSuggestedResponseTimeout(app::kExpectedIMProcessingTime);
}
ReturnErrorOnFailure(mExchangeCtx->SendMessage(Protocols::InteractionModel::MsgType::InvokeCommandResponse,
std::move(commandResponsePayload), sendFlag));
return CHIP_NO_ERROR;
}
const char * CommandResponseSender::GetStateStr() const
{
#if CHIP_DETAIL_LOGGING
switch (mState)
{
case State::ReadyForInvokeResponses:
return "ReadyForInvokeResponses";
case State::AwaitingStatusResponse:
return "AwaitingStatusResponse";
case State::AllInvokeResponsesSent:
return "AllInvokeResponsesSent";
case State::ErrorSentDelayCloseUntilOnDone:
return "ErrorSentDelayCloseUntilOnDone";
}
#endif // CHIP_DETAIL_LOGGING
return "N/A";
}
void CommandResponseSender::MoveToState(const State aTargetState)
{
if (mState == aTargetState)
{
return;
}
mState = aTargetState;
ChipLogDetail(DataManagement, "Command response sender moving to [%10.10s]", GetStateStr());
}
void CommandResponseSender::Close()
{
MoveToState(State::AllInvokeResponsesSent);
mpCallback->OnDone(*this);
}
void CommandResponseSender::OnInvokeCommandRequest(Messaging::ExchangeContext * ec, System::PacketBufferHandle && payload,
bool isTimedInvoke)
{
VerifyOrDieWithMsg(ec != nullptr, DataManagement, "Incoming exchange context should not be null");
VerifyOrDieWithMsg(mState == State::ReadyForInvokeResponses, DataManagement, "state should be ReadyForInvokeResponses");
// NOTE: we already know this is an InvokeRequestMessage because we explicitly registered with the
// Exchange Manager for unsolicited InvokeRequestMessages.
mExchangeCtx.Grab(ec);
mExchangeCtx->WillSendMessage();
// Grabbing Handle to prevent mCommandHandler from calling OnDone before OnInvokeCommandRequest returns.
// This allows us to send a StatusResponse error instead of any potentially queued up InvokeResponseMessages.
CommandHandler::Handle workHandle(&mCommandHandler);
Status status = mCommandHandler.OnInvokeCommandRequest(*this, std::move(payload), isTimedInvoke);
if (status != Status::Success)
{
VerifyOrDie(mState == State::ReadyForInvokeResponses);
SendStatusResponse(status);
// The API contract of OnInvokeCommandRequest requires the CommandResponder instance to outlive
// the CommandHandler. Therefore, we cannot safely call Close() here, even though we have
// finished sending data. Closing must be deferred until the CommandHandler::OnDone callback.
MoveToState(State::ErrorSentDelayCloseUntilOnDone);
}
}
size_t CommandResponseSender::GetCommandResponseMaxBufferSize()
{
if (!mExchangeCtx || !mExchangeCtx->HasSessionHandle())
{
ChipLogError(DataManagement, "Session not available. Unable to infer session-specific buffer capacities.");
return kMaxSecureSduLengthBytes;
}
if (mExchangeCtx->GetSessionHandle()->AllowsLargePayload())
{
return kMaxLargeSecureSduLengthBytes;
}
return kMaxSecureSduLengthBytes;
}
#if CHIP_WITH_NLFAULTINJECTION
void CommandResponseSender::TestOnlyInvokeCommandRequestWithFaultsInjected(Messaging::ExchangeContext * ec,
System::PacketBufferHandle && payload,
bool isTimedInvoke,
CommandHandlerImpl::NlFaultInjectionType faultType)
{
VerifyOrDieWithMsg(ec != nullptr, DataManagement, "TH Failure: Incoming exchange context should not be null");
VerifyOrDieWithMsg(mState == State::ReadyForInvokeResponses, DataManagement,
"TH Failure: state should be ReadyForInvokeResponses, issue with TH");
mExchangeCtx.Grab(ec);
mExchangeCtx->WillSendMessage();
mCommandHandler.TestOnlyInvokeCommandRequestWithFaultsInjected(*this, std::move(payload), isTimedInvoke, faultType);
}
#endif // CHIP_WITH_NLFAULTINJECTION
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,196 @@
/*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/CommandHandlerExchangeInterface.h>
#include <app/CommandHandlerImpl.h>
#include <app/StatusResponse.h>
#include <messaging/ExchangeHolder.h>
#include <system/SystemPacketBuffer.h>
namespace chip {
namespace app {
// TODO(#30453): Rename CommandResponseSender to CommandResponder in follow up PR
/**
* Manages the process of sending InvokeResponseMessage(s) to the requester.
*
* Implements the CommandHandlerExchangeInterface. Uses a CommandHandler member to process
* InvokeCommandRequest. The CommandHandler is provided a reference to this
* CommandHandlerExchangeInterface implementation to enable sending InvokeResponseMessage(s).
*/
class CommandResponseSender : public Messaging::ExchangeDelegate,
public CommandHandlerImpl::Callback,
public CommandHandlerExchangeInterface
{
public:
class Callback
{
public:
virtual ~Callback() = default;
/*
* Signals registered callback that this object has finished its work and can now be
* safely destroyed/released.
*/
virtual void OnDone(CommandResponseSender & apResponderObj) = 0;
};
CommandResponseSender(Callback * apCallback, CommandHandlerImpl::Callback * apDispatchCallback) :
mpCallback(apCallback), mpCommandHandlerCallback(apDispatchCallback), mCommandHandler(this), mExchangeCtx(*this)
{}
CHIP_ERROR OnMessageReceived(Messaging::ExchangeContext * ec, const PayloadHeader & payloadHeader,
System::PacketBufferHandle && payload) override;
void OnResponseTimeout(Messaging::ExchangeContext * ec) override;
void OnDone(CommandHandlerImpl & apCommandObj) override;
void DispatchCommand(CommandHandlerImpl & apCommandObj, const ConcreteCommandPath & aCommandPath,
TLV::TLVReader & apPayload) override;
Protocols::InteractionModel::Status CommandExists(const ConcreteCommandPath & aCommandPath) override;
/**
* Gets the inner exchange context object, without ownership.
*
* WARNING: This is dangerous, since it is directly interacting with the
* exchange being managed automatically by mExchangeCtx and
* if not done carefully, may end up with use-after-free errors.
*
* @return The inner exchange context, might be nullptr if no
* exchange context has been assigned or the context
* has been released.
*/
Messaging::ExchangeContext * GetExchangeContext() const override { return mExchangeCtx.Get(); }
/**
* Gets subject descriptor of the exchange.
*
* WARNING: This method should only be called when the caller is certain the
* session has not been evicted.
*/
Access::SubjectDescriptor GetSubjectDescriptor() const override
{
VerifyOrDie(mExchangeCtx);
return mExchangeCtx->GetSessionHandle()->GetSubjectDescriptor();
}
FabricIndex GetAccessingFabricIndex() const override
{
VerifyOrDie(mExchangeCtx);
return mExchangeCtx->GetSessionHandle()->GetFabricIndex();
}
Optional<GroupId> GetGroupId() const override
{
VerifyOrDie(mExchangeCtx);
auto sessionHandle = mExchangeCtx->GetSessionHandle();
if (sessionHandle->GetSessionType() != Transport::Session::SessionType::kGroupIncoming)
{
return NullOptional;
}
return MakeOptional(sessionHandle->AsIncomingGroupSession()->GetGroupId());
}
void HandlingSlowCommand() override
{
VerifyOrReturn(mExchangeCtx);
auto * msgContext = mExchangeCtx->GetReliableMessageContext();
VerifyOrReturn(msgContext != nullptr);
msgContext->FlushAcks();
}
void AddInvokeResponseToSend(System::PacketBufferHandle && aPacket) override
{
VerifyOrDie(mState == State::ReadyForInvokeResponses);
mChunks.AddToEnd(std::move(aPacket));
}
void ResponseDropped() override { mReportResponseDropped = true; }
size_t GetCommandResponseMaxBufferSize() override;
/*
* Main entrypoint for this class to handle an invoke request.
*
* isTimedInvoke is true if and only if this is part of a Timed Invoke
* transaction (i.e. was preceded by a Timed Request). If we reach here,
* the timer verification has already been done.
*/
void OnInvokeCommandRequest(Messaging::ExchangeContext * ec, System::PacketBufferHandle && payload, bool isTimedInvoke);
#if CHIP_WITH_NLFAULTINJECTION
/**
* @brief Sends InvokeResponseMessages with injected faults for certification testing.
*
* The Test Harness (TH) uses this to simulate various server response behaviors,
* ensuring the Device Under Test (DUT) handles responses per specification.
*
* This function strictly validates the DUT's InvokeRequestMessage against the test plan.
* If deviations occur, the TH terminates with a detailed error message.
*
* @param ec Exchange context for sending InvokeResponseMessages to the client.
* @param payload Payload of the incoming InvokeRequestMessage from the client.
* @param isTimedInvoke Indicates whether the interaction is timed.
* @param faultType The specific type of fault to inject into the response.
*/
void TestOnlyInvokeCommandRequestWithFaultsInjected(Messaging::ExchangeContext * ec, System::PacketBufferHandle && payload,
bool isTimedInvoke, CommandHandlerImpl::NlFaultInjectionType faultType);
#endif // CHIP_WITH_NLFAULTINJECTION
private:
enum class State : uint8_t
{
ReadyForInvokeResponses, ///< Accepting InvokeResponses to send back to requester.
AwaitingStatusResponse, ///< Awaiting status response from requester, after sending InvokeResponse.
AllInvokeResponsesSent, ///< All InvokeResponses have been sent out.
ErrorSentDelayCloseUntilOnDone ///< We have sent an early error response, but still need to clean up.
};
void MoveToState(const State aTargetState);
const char * GetStateStr() const;
/**
* @brief Initiates the sending of InvokeResponses previously queued using AddInvokeResponseToSend.
*/
void StartSendingCommandResponses();
void SendStatusResponse(Protocols::InteractionModel::Status aStatus)
{
StatusResponse::Send(aStatus, mExchangeCtx.Get(), /*aExpectResponse = */ false);
}
CHIP_ERROR SendCommandResponse();
bool HasMoreToSend() { return !mChunks.IsNull() || mReportResponseDropped; }
void Close();
// A list of InvokeResponseMessages to be sent out by CommandResponseSender.
System::PacketBufferHandle mChunks;
Callback * mpCallback;
CommandHandlerImpl::Callback * mpCommandHandlerCallback;
CommandHandlerImpl mCommandHandler;
Messaging::ExchangeHolder mExchangeCtx;
State mState = State::ReadyForInvokeResponses;
bool mReportResponseDropped = false;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,710 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "CommandSender.h"
#include "StatusResponse.h"
#include <app/InteractionModelTimeout.h>
#include <app/TimedRequest.h>
#include <platform/LockTracker.h>
#include <protocols/Protocols.h>
#include <protocols/interaction_model/Constants.h>
namespace chip {
namespace app {
namespace {
// Gets the CommandRef if available. Error returned if we expected CommandRef and it wasn't
// provided in the response.
template <typename ParserT>
CHIP_ERROR GetRef(ParserT aParser, Optional<uint16_t> & aRef, bool commandRefRequired)
{
CHIP_ERROR err = CHIP_NO_ERROR;
uint16_t ref;
err = aParser.GetRef(&ref);
VerifyOrReturnError(err == CHIP_NO_ERROR || err == CHIP_END_OF_TLV, err);
if (err == CHIP_END_OF_TLV)
{
if (commandRefRequired)
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
aRef = NullOptional;
return CHIP_NO_ERROR;
}
aRef = MakeOptional(ref);
return CHIP_NO_ERROR;
}
} // namespace
CommandSender::CommandSender(Callback * apCallback, Messaging::ExchangeManager * apExchangeMgr, bool aIsTimedRequest,
bool aSuppressResponse, bool aAllowLargePayload) :
mExchangeCtx(*this),
mCallbackHandle(apCallback), mpExchangeMgr(apExchangeMgr), mSuppressResponse(aSuppressResponse), mTimedRequest(aIsTimedRequest),
mAllowLargePayload(aAllowLargePayload)
{
assertChipStackLockedByCurrentThread();
}
CommandSender::CommandSender(ExtendableCallback * apExtendableCallback, Messaging::ExchangeManager * apExchangeMgr,
bool aIsTimedRequest, bool aSuppressResponse, bool aAllowLargePayload) :
mExchangeCtx(*this),
mCallbackHandle(apExtendableCallback), mpExchangeMgr(apExchangeMgr), mSuppressResponse(aSuppressResponse),
mTimedRequest(aIsTimedRequest), mUseExtendableCallback(true), mAllowLargePayload(aAllowLargePayload)
{
assertChipStackLockedByCurrentThread();
#if CHIP_CONFIG_COMMAND_SENDER_BUILTIN_SUPPORT_FOR_BATCHED_COMMANDS
mpPendingResponseTracker = &mNonTestPendingResponseTracker;
#endif // CHIP_CONFIG_COMMAND_SENDER_BUILTIN_SUPPORT_FOR_BATCHED_COMMANDS
}
CommandSender::~CommandSender()
{
assertChipStackLockedByCurrentThread();
}
CHIP_ERROR CommandSender::AllocateBuffer()
{
if (!mBufferAllocated)
{
mCommandMessageWriter.Reset();
System::PacketBufferHandle commandPacket;
if (mAllowLargePayload)
{
commandPacket = System::PacketBufferHandle::New(kMaxLargeSecureSduLengthBytes);
}
else
{
commandPacket = System::PacketBufferHandle::New(kMaxSecureSduLengthBytes);
}
VerifyOrReturnError(!commandPacket.IsNull(), CHIP_ERROR_NO_MEMORY);
mCommandMessageWriter.Init(std::move(commandPacket));
ReturnErrorOnFailure(mInvokeRequestBuilder.InitWithEndBufferReserved(&mCommandMessageWriter));
mInvokeRequestBuilder.SuppressResponse(mSuppressResponse).TimedRequest(mTimedRequest);
ReturnErrorOnFailure(mInvokeRequestBuilder.GetError());
mInvokeRequestBuilder.CreateInvokeRequests(/* aReserveEndBuffer = */ true);
ReturnErrorOnFailure(mInvokeRequestBuilder.GetError());
mBufferAllocated = true;
}
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::SendCommandRequestInternal(const SessionHandle & session, Optional<System::Clock::Timeout> timeout)
{
VerifyOrReturnError(mState == State::AddedCommand, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorOnFailure(Finalize(mPendingInvokeData));
// Create a new exchange context.
auto exchange = mpExchangeMgr->NewContext(session, this);
VerifyOrReturnError(exchange != nullptr, CHIP_ERROR_NO_MEMORY);
mExchangeCtx.Grab(exchange);
VerifyOrReturnError(!mExchangeCtx->IsGroupExchangeContext(), CHIP_ERROR_INVALID_MESSAGE_TYPE);
mExchangeCtx->SetResponseTimeout(timeout.ValueOr(session->ComputeRoundTripTimeout(app::kExpectedIMProcessingTime)));
if (mTimedInvokeTimeoutMs.HasValue())
{
ReturnErrorOnFailure(TimedRequest::Send(mExchangeCtx.Get(), mTimedInvokeTimeoutMs.Value()));
MoveToState(State::AwaitingTimedStatus);
return CHIP_NO_ERROR;
}
return SendInvokeRequest();
}
#if CONFIG_BUILD_FOR_HOST_UNIT_TEST
CHIP_ERROR CommandSender::TestOnlyCommandSenderTimedRequestFlagWithNoTimedInvoke(const SessionHandle & session,
Optional<System::Clock::Timeout> timeout)
{
VerifyOrReturnError(mTimedRequest, CHIP_ERROR_INCORRECT_STATE);
return SendCommandRequestInternal(session, timeout);
}
#endif
CHIP_ERROR CommandSender::SendCommandRequest(const SessionHandle & session, Optional<System::Clock::Timeout> timeout)
{
// If the command is expected to be large, ensure that the underlying
// session supports it.
if (mAllowLargePayload)
{
VerifyOrReturnError(session->AllowsLargePayload(), CHIP_ERROR_INCORRECT_STATE);
}
if (mTimedRequest != mTimedInvokeTimeoutMs.HasValue())
{
ChipLogError(
DataManagement,
"Inconsistent timed request state in CommandSender: mTimedRequest (%d) != mTimedInvokeTimeoutMs.HasValue() (%d)",
mTimedRequest, mTimedInvokeTimeoutMs.HasValue());
return CHIP_ERROR_INCORRECT_STATE;
}
return SendCommandRequestInternal(session, timeout);
}
CHIP_ERROR CommandSender::SendGroupCommandRequest(const SessionHandle & session)
{
VerifyOrReturnError(mState == State::AddedCommand, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorOnFailure(Finalize(mPendingInvokeData));
// Create a new exchange context.
auto exchange = mpExchangeMgr->NewContext(session, this);
VerifyOrReturnError(exchange != nullptr, CHIP_ERROR_NO_MEMORY);
mExchangeCtx.Grab(exchange);
VerifyOrReturnError(mExchangeCtx->IsGroupExchangeContext(), CHIP_ERROR_INVALID_MESSAGE_TYPE);
ReturnErrorOnFailure(SendInvokeRequest());
Close();
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::SendInvokeRequest()
{
using namespace Protocols::InteractionModel;
using namespace Messaging;
ReturnErrorOnFailure(
mExchangeCtx->SendMessage(MsgType::InvokeCommandRequest, std::move(mPendingInvokeData), SendMessageFlags::kExpectResponse));
MoveToState(State::AwaitingResponse);
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::OnMessageReceived(Messaging::ExchangeContext * apExchangeContext, const PayloadHeader & aPayloadHeader,
System::PacketBufferHandle && aPayload)
{
using namespace Protocols::InteractionModel;
if (mState == State::AwaitingResponse)
{
MoveToState(State::ResponseReceived);
}
CHIP_ERROR err = CHIP_NO_ERROR;
bool sendStatusResponse = false;
bool moreChunkedMessages = false;
VerifyOrExit(apExchangeContext == mExchangeCtx.Get(), err = CHIP_ERROR_INCORRECT_STATE);
sendStatusResponse = true;
if (mState == State::AwaitingTimedStatus)
{
if (aPayloadHeader.HasMessageType(Protocols::InteractionModel::MsgType::StatusResponse))
{
CHIP_ERROR statusError = CHIP_NO_ERROR;
SuccessOrExit(err = StatusResponse::ProcessStatusResponse(std::move(aPayload), statusError));
sendStatusResponse = false;
SuccessOrExit(err = statusError);
err = SendInvokeRequest();
}
else
{
err = CHIP_ERROR_INVALID_MESSAGE_TYPE;
}
// Skip all other processing here (which is for the response to the
// invoke request), no matter whether err is success or not.
goto exit;
}
if (aPayloadHeader.HasMessageType(MsgType::InvokeCommandResponse))
{
mInvokeResponseMessageCount++;
err = ProcessInvokeResponse(std::move(aPayload), moreChunkedMessages);
SuccessOrExit(err);
if (moreChunkedMessages)
{
StatusResponse::Send(Status::Success, apExchangeContext, /*aExpectResponse = */ true);
MoveToState(State::AwaitingResponse);
return CHIP_NO_ERROR;
}
sendStatusResponse = false;
}
else if (aPayloadHeader.HasMessageType(MsgType::StatusResponse))
{
CHIP_ERROR statusError = CHIP_NO_ERROR;
SuccessOrExit(err = StatusResponse::ProcessStatusResponse(std::move(aPayload), statusError));
SuccessOrExit(err = statusError);
err = CHIP_ERROR_INVALID_MESSAGE_TYPE;
}
else
{
err = CHIP_ERROR_INVALID_MESSAGE_TYPE;
}
exit:
if (err != CHIP_NO_ERROR)
{
OnErrorCallback(err);
}
if (sendStatusResponse)
{
StatusResponse::Send(Status::InvalidAction, apExchangeContext, /*aExpectResponse = */ false);
}
if (mState != State::AwaitingResponse)
{
if (err == CHIP_NO_ERROR)
{
FlushNoCommandResponse();
}
Close();
}
// Else we got a response to a Timed Request and just sent the invoke.
return err;
}
CHIP_ERROR CommandSender::ProcessInvokeResponse(System::PacketBufferHandle && payload, bool & moreChunkedMessages)
{
CHIP_ERROR err = CHIP_NO_ERROR;
System::PacketBufferTLVReader reader;
TLV::TLVReader invokeResponsesReader;
InvokeResponseMessage::Parser invokeResponseMessage;
InvokeResponseIBs::Parser invokeResponses;
bool suppressResponse = false;
reader.Init(std::move(payload));
ReturnErrorOnFailure(invokeResponseMessage.Init(reader));
#if CHIP_CONFIG_IM_PRETTY_PRINT
invokeResponseMessage.PrettyPrint();
#endif
ReturnErrorOnFailure(invokeResponseMessage.GetSuppressResponse(&suppressResponse));
ReturnErrorOnFailure(invokeResponseMessage.GetInvokeResponses(&invokeResponses));
invokeResponses.GetReader(&invokeResponsesReader);
while (CHIP_NO_ERROR == (err = invokeResponsesReader.Next()))
{
VerifyOrReturnError(TLV::AnonymousTag() == invokeResponsesReader.GetTag(), CHIP_ERROR_INVALID_TLV_TAG);
InvokeResponseIB::Parser invokeResponse;
ReturnErrorOnFailure(invokeResponse.Init(invokeResponsesReader));
ReturnErrorOnFailure(ProcessInvokeResponseIB(invokeResponse));
}
err = invokeResponseMessage.GetMoreChunkedMessages(&moreChunkedMessages);
// If the MoreChunkedMessages element is absent, we receive CHIP_END_OF_TLV. In this
// case, per the specification, a default value of false is used.
if (CHIP_END_OF_TLV == err)
{
moreChunkedMessages = false;
err = CHIP_NO_ERROR;
}
ReturnErrorOnFailure(err);
if (suppressResponse && moreChunkedMessages)
{
ChipLogError(DataManagement, "Spec violation! InvokeResponse has suppressResponse=true, and moreChunkedMessages=true");
// TODO Is there a better error to return here?
return CHIP_ERROR_INVALID_TLV_ELEMENT;
}
// if we have exhausted this container
if (CHIP_END_OF_TLV == err)
{
err = CHIP_NO_ERROR;
}
ReturnErrorOnFailure(err);
return invokeResponseMessage.ExitContainer();
}
void CommandSender::OnResponseTimeout(Messaging::ExchangeContext * apExchangeContext)
{
ChipLogProgress(DataManagement, "Time out! failed to receive invoke command response from Exchange: " ChipLogFormatExchange,
ChipLogValueExchange(apExchangeContext));
OnErrorCallback(CHIP_ERROR_TIMEOUT);
Close();
}
void CommandSender::FlushNoCommandResponse()
{
if (mpPendingResponseTracker && mUseExtendableCallback && mCallbackHandle.extendableCallback)
{
Optional<uint16_t> commandRef = mpPendingResponseTracker->PopPendingResponse();
while (commandRef.HasValue())
{
NoResponseData noResponseData = { commandRef.Value() };
mCallbackHandle.extendableCallback->OnNoResponse(this, noResponseData);
commandRef = mpPendingResponseTracker->PopPendingResponse();
}
}
}
void CommandSender::Close()
{
mSuppressResponse = false;
mTimedRequest = false;
MoveToState(State::AwaitingDestruction);
OnDoneCallback();
}
CHIP_ERROR CommandSender::ProcessInvokeResponseIB(InvokeResponseIB::Parser & aInvokeResponse)
{
CHIP_ERROR err = CHIP_NO_ERROR;
ClusterId clusterId;
CommandId commandId;
EndpointId endpointId;
// Default to success when an invoke response is received.
StatusIB statusIB;
{
bool hasDataResponse = false;
TLV::TLVReader commandDataReader;
Optional<uint16_t> commandRef;
bool commandRefRequired = (mFinishedCommandCount > 1);
CommandStatusIB::Parser commandStatus;
err = aInvokeResponse.GetStatus(&commandStatus);
if (CHIP_NO_ERROR == err)
{
CommandPathIB::Parser commandPath;
ReturnErrorOnFailure(commandStatus.GetPath(&commandPath));
ReturnErrorOnFailure(commandPath.GetClusterId(&clusterId));
ReturnErrorOnFailure(commandPath.GetCommandId(&commandId));
ReturnErrorOnFailure(commandPath.GetEndpointId(&endpointId));
StatusIB::Parser status;
commandStatus.GetErrorStatus(&status);
ReturnErrorOnFailure(status.DecodeStatusIB(statusIB));
ReturnErrorOnFailure(GetRef(commandStatus, commandRef, commandRefRequired));
}
else if (CHIP_END_OF_TLV == err)
{
CommandDataIB::Parser commandData;
CommandPathIB::Parser commandPath;
ReturnErrorOnFailure(aInvokeResponse.GetCommand(&commandData));
ReturnErrorOnFailure(commandData.GetPath(&commandPath));
ReturnErrorOnFailure(commandPath.GetEndpointId(&endpointId));
ReturnErrorOnFailure(commandPath.GetClusterId(&clusterId));
ReturnErrorOnFailure(commandPath.GetCommandId(&commandId));
commandData.GetFields(&commandDataReader);
ReturnErrorOnFailure(GetRef(commandData, commandRef, commandRefRequired));
err = CHIP_NO_ERROR;
hasDataResponse = true;
}
if (err != CHIP_NO_ERROR)
{
ChipLogError(DataManagement, "Received malformed Command Response, err=%" CHIP_ERROR_FORMAT, err.Format());
}
else
{
if (hasDataResponse)
{
ChipLogProgress(DataManagement,
"Received Command Response Data, Endpoint=%u Cluster=" ChipLogFormatMEI
" Command=" ChipLogFormatMEI,
endpointId, ChipLogValueMEI(clusterId), ChipLogValueMEI(commandId));
}
else
{
ChipLogProgress(DataManagement,
"Received Command Response Status for Endpoint=%u Cluster=" ChipLogFormatMEI
" Command=" ChipLogFormatMEI " Status=0x%x",
endpointId, ChipLogValueMEI(clusterId), ChipLogValueMEI(commandId),
to_underlying(statusIB.mStatus));
}
}
ReturnErrorOnFailure(err);
if (commandRef.HasValue() && mpPendingResponseTracker != nullptr)
{
err = mpPendingResponseTracker->Remove(commandRef.Value());
if (err != CHIP_NO_ERROR)
{
// This can happen for two reasons:
// 1. The current InvokeResponse is a duplicate (based on its commandRef).
// 2. The current InvokeResponse is for a request we never sent (based on its commandRef).
// Used when logging errors related to server violating spec.
[[maybe_unused]] ScopedNodeId remoteScopedNode;
if (mExchangeCtx.Get() && mExchangeCtx.Get()->HasSessionHandle())
{
remoteScopedNode = mExchangeCtx.Get()->GetSessionHandle()->GetPeer();
}
ChipLogError(DataManagement,
"Received Unexpected Response from remote node " ChipLogFormatScopedNodeId ", commandRef=%u",
ChipLogValueScopedNodeId(remoteScopedNode), commandRef.Value());
return err;
}
}
if (!commandRef.HasValue() && !commandRefRequired && mpPendingResponseTracker != nullptr &&
mpPendingResponseTracker->Count() == 1)
{
// We have sent out a single invoke request. As per spec, server in this case doesn't need to provide the CommandRef
// in the response. This is allowed to support communicating with a legacy server. In this case we assume the response
// is associated with the only command we sent out.
commandRef = mpPendingResponseTracker->PopPendingResponse();
}
// When using ExtendableCallbacks, we are adhering to a different API contract where path
// specific errors are sent to the OnResponse callback. For more information on the history
// of this issue please see https://github.com/project-chip/connectedhomeip/issues/30991
if (statusIB.IsSuccess() || mUseExtendableCallback)
{
const ConcreteCommandPath concretePath = ConcreteCommandPath(endpointId, clusterId, commandId);
ResponseData responseData = { concretePath, statusIB };
responseData.data = hasDataResponse ? &commandDataReader : nullptr;
responseData.commandRef = commandRef;
OnResponseCallback(responseData);
}
else
{
OnErrorCallback(statusIB.ToChipError());
}
}
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::SetCommandSenderConfig(CommandSender::ConfigParameters & aConfigParams)
{
VerifyOrReturnError(mState == State::Idle, CHIP_ERROR_INCORRECT_STATE);
VerifyOrReturnError(aConfigParams.remoteMaxPathsPerInvoke > 0, CHIP_ERROR_INVALID_ARGUMENT);
if (mpPendingResponseTracker != nullptr)
{
mRemoteMaxPathsPerInvoke = aConfigParams.remoteMaxPathsPerInvoke;
mBatchCommandsEnabled = (aConfigParams.remoteMaxPathsPerInvoke > 1);
}
else
{
VerifyOrReturnError(aConfigParams.remoteMaxPathsPerInvoke == 1, CHIP_ERROR_UNSUPPORTED_CHIP_FEATURE);
}
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::PrepareCommand(const CommandPathParams & aCommandPathParams,
PrepareCommandParameters & aPrepareCommandParams)
{
ReturnErrorOnFailure(AllocateBuffer());
//
// We must not be in the middle of preparing a command, and must not have already sent InvokeRequestMessage.
//
bool canAddAnotherCommand = (mState == State::AddedCommand && mBatchCommandsEnabled && mUseExtendableCallback);
VerifyOrReturnError(mState == State::Idle || canAddAnotherCommand, CHIP_ERROR_INCORRECT_STATE);
VerifyOrReturnError(mFinishedCommandCount < mRemoteMaxPathsPerInvoke, CHIP_ERROR_MAXIMUM_PATHS_PER_INVOKE_EXCEEDED);
if (mBatchCommandsEnabled)
{
VerifyOrReturnError(mpPendingResponseTracker != nullptr, CHIP_ERROR_INCORRECT_STATE);
VerifyOrReturnError(aPrepareCommandParams.commandRef.HasValue(), CHIP_ERROR_INVALID_ARGUMENT);
uint16_t commandRef = aPrepareCommandParams.commandRef.Value();
VerifyOrReturnError(!mpPendingResponseTracker->IsTracked(commandRef), CHIP_ERROR_INVALID_ARGUMENT);
}
InvokeRequests::Builder & invokeRequests = mInvokeRequestBuilder.GetInvokeRequests();
CommandDataIB::Builder & invokeRequest = invokeRequests.CreateCommandData();
ReturnErrorOnFailure(invokeRequests.GetError());
CommandPathIB::Builder & path = invokeRequest.CreatePath();
ReturnErrorOnFailure(invokeRequest.GetError());
ReturnErrorOnFailure(path.Encode(aCommandPathParams));
if (aPrepareCommandParams.startDataStruct)
{
ReturnErrorOnFailure(invokeRequest.GetWriter()->StartContainer(TLV::ContextTag(CommandDataIB::Tag::kFields),
TLV::kTLVType_Structure, mDataElementContainerType));
}
MoveToState(State::AddingCommand);
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::FinishCommand(FinishCommandParameters & aFinishCommandParams)
{
if (mBatchCommandsEnabled)
{
VerifyOrReturnError(mpPendingResponseTracker != nullptr, CHIP_ERROR_INCORRECT_STATE);
VerifyOrReturnError(aFinishCommandParams.commandRef.HasValue(), CHIP_ERROR_INVALID_ARGUMENT);
uint16_t commandRef = aFinishCommandParams.commandRef.Value();
VerifyOrReturnError(!mpPendingResponseTracker->IsTracked(commandRef), CHIP_ERROR_INVALID_ARGUMENT);
}
return FinishCommandInternal(aFinishCommandParams);
}
CHIP_ERROR CommandSender::AddRequestData(const CommandPathParams & aCommandPath, const DataModel::EncodableToTLV & aEncodable,
AddRequestDataParameters & aAddRequestDataParams)
{
ReturnErrorOnFailure(AllocateBuffer());
RollbackInvokeRequest rollback(*this);
PrepareCommandParameters prepareCommandParams(aAddRequestDataParams);
ReturnErrorOnFailure(PrepareCommand(aCommandPath, prepareCommandParams));
TLV::TLVWriter * writer = GetCommandDataIBTLVWriter();
VerifyOrReturnError(writer != nullptr, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorOnFailure(aEncodable.EncodeTo(*writer, TLV::ContextTag(CommandDataIB::Tag::kFields)));
FinishCommandParameters finishCommandParams(aAddRequestDataParams);
ReturnErrorOnFailure(FinishCommand(finishCommandParams));
rollback.DisableAutomaticRollback();
return CHIP_NO_ERROR;
}
CHIP_ERROR CommandSender::FinishCommandInternal(FinishCommandParameters & aFinishCommandParams)
{
CHIP_ERROR err = CHIP_NO_ERROR;
VerifyOrReturnError(mState == State::AddingCommand, err = CHIP_ERROR_INCORRECT_STATE);
CommandDataIB::Builder & commandData = mInvokeRequestBuilder.GetInvokeRequests().GetCommandData();
if (aFinishCommandParams.endDataStruct)
{
ReturnErrorOnFailure(commandData.GetWriter()->EndContainer(mDataElementContainerType));
}
if (aFinishCommandParams.commandRef.HasValue())
{
ReturnErrorOnFailure(commandData.Ref(aFinishCommandParams.commandRef.Value()));
}
ReturnErrorOnFailure(commandData.EndOfCommandDataIB());
MoveToState(State::AddedCommand);
mFinishedCommandCount++;
if (mpPendingResponseTracker && aFinishCommandParams.commandRef.HasValue())
{
mpPendingResponseTracker->Add(aFinishCommandParams.commandRef.Value());
}
if (aFinishCommandParams.timedInvokeTimeoutMs.HasValue())
{
SetTimedInvokeTimeoutMs(aFinishCommandParams.timedInvokeTimeoutMs);
}
return CHIP_NO_ERROR;
}
TLV::TLVWriter * CommandSender::GetCommandDataIBTLVWriter()
{
if (mState != State::AddingCommand)
{
return nullptr;
}
return mInvokeRequestBuilder.GetInvokeRequests().GetCommandData().GetWriter();
}
void CommandSender::SetTimedInvokeTimeoutMs(const Optional<uint16_t> & aTimedInvokeTimeoutMs)
{
if (!mTimedInvokeTimeoutMs.HasValue())
{
mTimedInvokeTimeoutMs = aTimedInvokeTimeoutMs;
}
else if (aTimedInvokeTimeoutMs.HasValue())
{
uint16_t newValue = std::min(mTimedInvokeTimeoutMs.Value(), aTimedInvokeTimeoutMs.Value());
mTimedInvokeTimeoutMs.SetValue(newValue);
}
}
size_t CommandSender::GetInvokeResponseMessageCount()
{
return static_cast<size_t>(mInvokeResponseMessageCount);
}
CHIP_ERROR CommandSender::Finalize(System::PacketBufferHandle & commandPacket)
{
VerifyOrReturnError(mState == State::AddedCommand, CHIP_ERROR_INCORRECT_STATE);
ReturnErrorOnFailure(mInvokeRequestBuilder.GetInvokeRequests().EndOfInvokeRequests());
ReturnErrorOnFailure(mInvokeRequestBuilder.EndOfInvokeRequestMessage());
return mCommandMessageWriter.Finalize(&commandPacket);
}
const char * CommandSender::GetStateStr() const
{
#if CHIP_DETAIL_LOGGING
switch (mState)
{
case State::Idle:
return "Idle";
case State::AddingCommand:
return "AddingCommand";
case State::AddedCommand:
return "AddedCommand";
case State::AwaitingTimedStatus:
return "AwaitingTimedStatus";
case State::AwaitingResponse:
return "AwaitingResponse";
case State::ResponseReceived:
return "ResponseReceived";
case State::AwaitingDestruction:
return "AwaitingDestruction";
}
#endif // CHIP_DETAIL_LOGGING
return "N/A";
}
void CommandSender::MoveToState(const State aTargetState)
{
mState = aTargetState;
ChipLogDetail(DataManagement, "ICR moving to [%10.10s]", GetStateStr());
}
CommandSender::RollbackInvokeRequest::RollbackInvokeRequest(CommandSender & aCommandSender) : mCommandSender(aCommandSender)
{
VerifyOrReturn(mCommandSender.mBufferAllocated);
VerifyOrReturn(mCommandSender.mState == State::Idle || mCommandSender.mState == State::AddedCommand);
VerifyOrReturn(mCommandSender.mInvokeRequestBuilder.GetInvokeRequests().GetError() == CHIP_NO_ERROR);
VerifyOrReturn(mCommandSender.mInvokeRequestBuilder.GetError() == CHIP_NO_ERROR);
mCommandSender.mInvokeRequestBuilder.Checkpoint(mBackupWriter);
mBackupState = mCommandSender.mState;
mRollbackInDestructor = true;
}
CommandSender::RollbackInvokeRequest::~RollbackInvokeRequest()
{
VerifyOrReturn(mRollbackInDestructor);
VerifyOrReturn(mCommandSender.mState == State::AddingCommand);
ChipLogDetail(DataManagement, "Rolling back response");
// TODO(#30453): Rollback of mInvokeRequestBuilder should handle resetting
// InvokeRequests.
mCommandSender.mInvokeRequestBuilder.GetInvokeRequests().ResetError();
mCommandSender.mInvokeRequestBuilder.Rollback(mBackupWriter);
mCommandSender.MoveToState(mBackupState);
mRollbackInDestructor = false;
}
void CommandSender::RollbackInvokeRequest::DisableAutomaticRollback()
{
mRollbackInDestructor = false;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,678 @@
/*
*
* Copyright (c) 2020 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* This file defines objects for a CHIP IM Invoke Command Sender
*
*/
#pragma once
#include <type_traits>
#include "CommandSenderLegacyCallback.h"
#include <app/CommandPathParams.h>
#include <app/MessageDef/InvokeRequestMessage.h>
#include <app/MessageDef/InvokeResponseMessage.h>
#include <app/MessageDef/StatusIB.h>
#include <app/PendingResponseTrackerImpl.h>
#include <app/data-model/EncodableToTLV.h>
#include <app/data-model/Encode.h>
#include <lib/core/CHIPCore.h>
#include <lib/core/Optional.h>
#include <lib/core/TLVDebug.h>
#include <lib/support/BitFlags.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/DLLUtil.h>
#include <lib/support/logging/CHIPLogging.h>
#include <messaging/ExchangeHolder.h>
#include <messaging/ExchangeMgr.h>
#include <messaging/Flags.h>
#include <protocols/Protocols.h>
#include <system/SystemPacketBuffer.h>
#include <system/TLVPacketBufferBackingStore.h>
#define COMMON_STATUS_SUCCESS 0
namespace chip {
namespace app {
class CommandSender final : public Messaging::ExchangeDelegate
{
public:
// CommandSender::ExtendableCallback::OnResponse is public SDK API, so we cannot break
// source compatibility for it. To allow for additional values to be added at a future
// time without constantly changing the function's declaration parameter list, we are
// defining the struct ResponseData and adding that to the parameter list to allow for
// future extendability.
struct ResponseData
{
// The command path field in invoke command response.
const ConcreteCommandPath & path;
// The status of the command. It can be any success status, including possibly a cluster-specific one.
// If `data` is not null, statusIB will always be a generic SUCCESS status with no-cluster specific
// information.
const StatusIB & statusIB;
// The command data, will be nullptr if the server returns a StatusIB.
TLV::TLVReader * data;
// Reference for the command. This should be associated with the reference value sent out in the initial
// invoke request.
Optional<uint16_t> commandRef;
};
// CommandSender::ExtendableCallback::OnNoResponse is public SDK API, so we cannot break
// source compatibility for it. To allow for additional values to be added at a future
// time without constantly changing the function's declaration parameter list, we are
// defining the struct NoResponseData and adding that to the parameter list to allow for
// future extendability.
struct NoResponseData
{
uint16_t commandRef;
};
// CommandSender::ExtendableCallback::OnError is public SDK API, so we cannot break source
// compatibility for it. To allow for additional values to be added at a future time
// without constantly changing the function's declaration parameter list, we are
// defining the struct ErrorData and adding that to the parameter list
// to allow for future extendability.
struct ErrorData
{
/**
* The following errors will be delivered through `error`
*
* - CHIP_ERROR_TIMEOUT: A response was not received within the expected response timeout.
* - CHIP_ERROR_*TLV*: A malformed, non-compliant response was received from the server.
* - CHIP_ERROR encapsulating a StatusIB: If we got a non-path-specific
* status response from the server. In that case, constructing
* a StatusIB from the error can be used to extract the status.
* - CHIP_ERROR*: All other cases.
*/
CHIP_ERROR error;
};
/**
* @brief Callback that is extendable for future features, starting with batch commands
*
* The two major differences between ExtendableCallback and Callback are:
* 1. Path-specific errors go to OnResponse instead of OnError
* - Note: Non-path-specific errors still go to OnError.
* 2. Instead of having new parameters at the end of the arguments list, with defaults,
* as functionality expands, a parameter whose type is defined in this header is used
* as the argument to the callbacks
*
* To support batch commands client must use ExtendableCallback.
*/
class ExtendableCallback
{
public:
virtual ~ExtendableCallback() = default;
/**
* OnResponse will be called for all path specific responses from the server that have been received
* and processed. Specifically:
* - When a status code is received and it is IM::Success, aData will be nullptr.
* - When a status code is received and it is IM and/or cluster error, aData will be nullptr.
* - These kinds of errors are referred to as path-specific errors.
* - When a data response is received, aData will point to a valid TLVReader initialized to point at the struct container
* that contains the data payload (callee will still need to open and process the container).
*
* The CommandSender object MUST continue to exist after this call is completed. The application shall wait until it
* receives an OnDone call to destroy the object.
*
* @param[in] commandSender The command sender object that initiated the command transaction.
* @param[in] aResponseData Information pertaining to the response.
*/
virtual void OnResponse(CommandSender * commandSender, const ResponseData & aResponseData) {}
/**
* Called for each request that failed to receive a response after the server indicates completion of all requests.
*
* This callback may be omitted if clients have alternative ways to track non-responses.
*
* The CommandSender object MUST continue to exist after this call is completed. The application shall wait until it
* receives an OnDone call to destroy the object.
*
* @param commandSender The CommandSender object that initiated the transaction.
* @param aNoResponseData Details about the request without a response.
*/
virtual void OnNoResponse(CommandSender * commandSender, const NoResponseData & aNoResponseData) {}
/**
* OnError will be called when a non-path-specific error occurs *after* a successful call to SendCommandRequest().
*
* The CommandSender object MUST continue to exist after this call is completed. The application shall wait until it
* receives an OnDone call to destroy and free the object.
*
* NOTE: Path specific errors do NOT come to OnError, but instead go to OnResponse.
*
* @param[in] apCommandSender The command sender object that initiated the command transaction.
* @param[in] aErrorData A error data regarding error that occurred.
*/
virtual void OnError(const CommandSender * apCommandSender, const ErrorData & aErrorData) {}
/**
* OnDone will be called when CommandSender has finished all work and is safe to destroy and free the
* allocated CommandSender object.
*
* This function will:
* - Always be called exactly *once* for a given CommandSender instance.
* - Be called even in error circumstances.
* - Only be called after a successful call to SendCommandRequest returns, if SendCommandRequest is used.
* - Always be called before a successful return from SendGroupCommandRequest, if SendGroupCommandRequest is used.
*
* This function must be implemented to destroy the CommandSender object.
*
* @param[in] apCommandSender The command sender object of the terminated invoke command transaction.
*/
virtual void OnDone(CommandSender * apCommandSender) = 0;
};
// `Callback` exists for legacy purposes. If you are developing a new callback implementation,
// please use `ExtendableCallback`.
using Callback = CommandSenderLegacyCallback;
// SetCommandSenderConfig is a public SDK API, so we cannot break source compatibility
// for it. By having parameters to that API use this struct instead of individual
// function arguments, we centralize required changes to one file when adding new
// funtionality.
struct ConfigParameters
{
ConfigParameters & SetRemoteMaxPathsPerInvoke(uint16_t aRemoteMaxPathsPerInvoke)
{
remoteMaxPathsPerInvoke = aRemoteMaxPathsPerInvoke;
return *this;
}
// If remoteMaxPathsPerInvoke is 1, this will allow the CommandSender client to contain only one command and
// doesn't enforce other batch commands requirements.
uint16_t remoteMaxPathsPerInvoke = 1;
};
// AddRequestData is a public SDK API, so we must maintain source compatibility.
// Using this struct for API parameters instead of individual parameters allows us
// to make necessary changes for new functionality in a single location.
struct AddRequestDataParameters
{
// gcc bug requires us to have the constructor below
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96645
AddRequestDataParameters() {}
AddRequestDataParameters(const Optional<uint16_t> & aTimedInvokeTimeoutMs) : timedInvokeTimeoutMs(aTimedInvokeTimeoutMs) {}
AddRequestDataParameters & SetCommandRef(uint16_t aCommandRef)
{
commandRef.SetValue(aCommandRef);
return *this;
}
// When a value is provided for timedInvokeTimeoutMs, this invoke becomes a timed
// invoke. CommandSender will use the minimum of all provided timeouts for execution.
const Optional<uint16_t> timedInvokeTimeoutMs;
// The command reference is required when sending multiple commands. It allows the caller
// to associate this request with its corresponding response.
Optional<uint16_t> commandRef;
};
// PrepareCommand is a public SDK API, so we must maintain source compatibility.
// Using this struct for API parameters instead of individual parameters allows us
// to make necessary changes for new functionality in a single location.
struct PrepareCommandParameters
{
// gcc bug requires us to have the constructor below
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96645
PrepareCommandParameters() {}
PrepareCommandParameters(const AddRequestDataParameters & aAddRequestDataParam) :
commandRef(aAddRequestDataParam.commandRef)
{}
PrepareCommandParameters & SetStartDataStruct(bool aStartDataStruct)
{
startDataStruct = aStartDataStruct;
return *this;
}
PrepareCommandParameters & SetCommandRef(uint16_t aCommandRef)
{
commandRef.SetValue(aCommandRef);
return *this;
}
// The command reference is required when sending multiple commands. It allows the caller
// to associate this request with its corresponding response. We validate the reference
// early in PrepareCommand, even though it's not used until FinishCommand. This proactive
// validation helps prevent unnecessary writing an InvokeRequest into the packet that later
// needs to be undone.
Optional<uint16_t> commandRef;
// If the InvokeRequest needs to be in a state with a started data TLV struct container
bool startDataStruct = false;
};
// FinishCommand is a public SDK API, so we must maintain source compatibility.
// Using this struct for API parameters instead of individual parameters allows us
// to make necessary changes for new functionality in a single location.
struct FinishCommandParameters
{
// gcc bug requires us to have the constructor below
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=96645
FinishCommandParameters() {}
FinishCommandParameters(const Optional<uint16_t> & aTimedInvokeTimeoutMs) : timedInvokeTimeoutMs(aTimedInvokeTimeoutMs) {}
FinishCommandParameters(const AddRequestDataParameters & aAddRequestDataParam) :
timedInvokeTimeoutMs(aAddRequestDataParam.timedInvokeTimeoutMs), commandRef(aAddRequestDataParam.commandRef)
{}
FinishCommandParameters & SetEndDataStruct(bool aEndDataStruct)
{
endDataStruct = aEndDataStruct;
return *this;
}
FinishCommandParameters & SetCommandRef(uint16_t aCommandRef)
{
commandRef.SetValue(aCommandRef);
return *this;
}
// When a value is provided for timedInvokeTimeoutMs, this invoke becomes a timed
// invoke. CommandSender will use the minimum of all provided timeouts for execution.
const Optional<uint16_t> timedInvokeTimeoutMs;
// The command reference is required when sending multiple commands. It allows the caller
// to associate this request with its corresponding response. This value must be
// the same as the one provided in PrepareCommandParameters when calling PrepareCommand.
Optional<uint16_t> commandRef;
// If InvokeRequest is in a state where the data TLV struct container is currently open
// and FinishCommand should close it.
bool endDataStruct = false;
};
class TestOnlyMarker
{
};
/*
* Constructor.
*
* The callback passed in has to outlive this CommandSender object.
* If used in a groups setting, callbacks do not need to be passed.
* If callbacks are passed the only one that will be called in a group sesttings is the onDone
*/
CommandSender(Callback * apCallback, Messaging::ExchangeManager * apExchangeMgr, bool aIsTimedRequest = false,
bool aSuppressResponse = false, bool aAllowLargePayload = false);
CommandSender(std::nullptr_t, Messaging::ExchangeManager * apExchangeMgr, bool aIsTimedRequest = false,
bool aSuppressResponse = false, bool aAllowLargePayload = false) :
CommandSender(static_cast<Callback *>(nullptr), apExchangeMgr, aIsTimedRequest, aSuppressResponse, aAllowLargePayload)
{}
CommandSender(ExtendableCallback * apCallback, Messaging::ExchangeManager * apExchangeMgr, bool aIsTimedRequest = false,
bool aSuppressResponse = false, bool aAllowLargePayload = false);
// TODO(#32138): After there is a macro that is always defined for all unit tests, the constructor with
// TestOnlyMarker should only be compiled if that macro is defined.
CommandSender(TestOnlyMarker aTestMarker, ExtendableCallback * apCallback, Messaging::ExchangeManager * apExchangeMgr,
PendingResponseTracker * apPendingResponseTracker, bool aIsTimedRequest = false, bool aSuppressResponse = false,
bool aAllowLargePayload = false) :
CommandSender(apCallback, apExchangeMgr, aIsTimedRequest, aSuppressResponse, aAllowLargePayload)
{
mpPendingResponseTracker = apPendingResponseTracker;
}
~CommandSender();
/**
* Enables additional features of CommandSender, for example sending batch commands.
*
* In the case of enabling batch commands, once set it ensures that commands contain all
* required data elements while building the InvokeRequestMessage. This must be called
* before PrepareCommand.
*
* @param [in] aConfigParams contains information to configure CommandSender behavior,
* such as such as allowing a max number of paths per invoke greater than one,
* based on how many paths the remote peer claims to support.
*
* @return CHIP_ERROR_INCORRECT_STATE
* If device has previously called `PrepareCommand`.
* @return CHIP_ERROR_INVALID_ARGUMENT
* Invalid argument value.
* @return CHIP_ERROR_UNSUPPORTED_CHIP_FEATURE
* Device has not enabled batch command support. To enable:
* 1. Enable the CHIP_CONFIG_COMMAND_SENDER_BUILTIN_SUPPORT_FOR_BATCHED_COMMANDS
* configuration option.
* 2. Ensure you provide ExtendableCallback.
*/
CHIP_ERROR SetCommandSenderConfig(ConfigParameters & aConfigParams);
CHIP_ERROR PrepareCommand(const CommandPathParams & aCommandPathParams, PrepareCommandParameters & aPrepareCommandParams);
[[deprecated("PrepareCommand should migrate to calling PrepareCommand with PrepareCommandParameters")]] CHIP_ERROR
PrepareCommand(const CommandPathParams & aCommandPathParams, bool aStartDataStruct = true)
{
PrepareCommandParameters prepareCommandParams;
prepareCommandParams.SetStartDataStruct(aStartDataStruct);
return PrepareCommand(aCommandPathParams, prepareCommandParams);
}
CHIP_ERROR FinishCommand(FinishCommandParameters & aFinishCommandParams);
[[deprecated("FinishCommand should migrate to calling FinishCommand with FinishCommandParameters")]] CHIP_ERROR
FinishCommand(bool aEndDataStruct = true)
{
FinishCommandParameters finishCommandParams;
finishCommandParams.SetEndDataStruct(aEndDataStruct);
return FinishCommand(finishCommandParams);
}
[[deprecated("FinishCommand should migrate to calling FinishCommand with FinishCommandParameters")]] CHIP_ERROR
FinishCommand(const Optional<uint16_t> & aTimedInvokeTimeoutMs)
{
FinishCommandParameters finishCommandParams(aTimedInvokeTimeoutMs);
return FinishCommand(finishCommandParams);
}
TLV::TLVWriter * GetCommandDataIBTLVWriter();
/**
* API for adding request data using DataModel::EncodableToTLV.
*
* @param [in] aCommandPath The path of the command being requested.
* @param [in] aEncodable The request data to encode into the
* `CommandFields` member of `CommandDataIB`.
* @param [in] aAddRequestDataParams parameters associated with building the
* InvokeRequestMessage that are associated with this request.
*
* This API will not fail if this is an untimed invoke but the command provided requires a timed
* invoke interaction. If the caller wants that to fail before sending the command, they should call
* the templated version of AddRequestData.
*/
CHIP_ERROR AddRequestData(const CommandPathParams & aCommandPath, const DataModel::EncodableToTLV & aEncodable,
AddRequestDataParameters & aAddRequestDataParams);
/**
* API for adding a data request. The template parameter T is generally
* expected to be a ClusterName::Commands::CommandName::Type struct, but any
* object that can be encoded using the DataModel::Encode machinery and
* exposes the right command id will work.
*
* @param [in] aCommandPath The path of the command being requested.
* @param [in] aData The data for the request.
*/
template <typename CommandDataT, typename std::enable_if_t<!CommandDataT::MustUseTimedInvoke(), int> = 0>
CHIP_ERROR AddRequestData(const CommandPathParams & aCommandPath, const CommandDataT & aData)
{
AddRequestDataParameters addRequestDataParams;
return AddRequestData(aCommandPath, aData, addRequestDataParams);
}
template <typename CommandDataT,
typename std::enable_if_t<!std::is_base_of_v<DataModel::EncodableToTLV, CommandDataT>, int> = 0>
CHIP_ERROR AddRequestData(const CommandPathParams & aCommandPath, const CommandDataT & aData,
AddRequestDataParameters & aAddRequestDataParams)
{
VerifyOrReturnError(!CommandDataT::MustUseTimedInvoke() || aAddRequestDataParams.timedInvokeTimeoutMs.HasValue(),
CHIP_ERROR_INVALID_ARGUMENT);
DataModel::EncodableType<CommandDataT> encodable(aData);
return AddRequestData(aCommandPath, encodable, aAddRequestDataParams);
}
template <typename CommandDataT>
CHIP_ERROR AddRequestData(const CommandPathParams & aCommandPath, const CommandDataT & aData,
const Optional<uint16_t> & aTimedInvokeTimeoutMs)
{
AddRequestDataParameters addRequestDataParams(aTimedInvokeTimeoutMs);
return AddRequestData(aCommandPath, aData, addRequestDataParams);
}
/**
* @brief Returns the number of InvokeResponseMessages received.
*
* Responses to multiple requests might be split across several InvokeResponseMessages.
* This function helps track the total count. Primarily for test validation purposes.
*/
size_t GetInvokeResponseMessageCount();
#if CONFIG_BUILD_FOR_HOST_UNIT_TEST
/**
* Version of AddRequestData that allows sending a message that is
* guaranteed to fail due to requiring a timed invoke but not providing a
* timeout parameter. For use in tests only.
*/
template <typename CommandDataT>
CHIP_ERROR TestOnlyAddRequestDataNoTimedCheck(const CommandPathParams & aCommandPath, const CommandDataT & aData,
AddRequestDataParameters & aAddRequestDataParams)
{
DataModel::EncodableType<CommandDataT> encodable(aData);
return AddRequestData(aCommandPath, encodable, aAddRequestDataParams);
}
CHIP_ERROR TestOnlyFinishCommand(FinishCommandParameters & aFinishCommandParams)
{
if (mBatchCommandsEnabled)
{
VerifyOrReturnError(aFinishCommandParams.commandRef.HasValue(), CHIP_ERROR_INVALID_ARGUMENT);
}
return FinishCommandInternal(aFinishCommandParams);
}
/**
* Version of SendCommandRequest that sets the TimedRequest flag but does not send the TimedInvoke
* action. For use in tests only.
*/
CHIP_ERROR TestOnlyCommandSenderTimedRequestFlagWithNoTimedInvoke(const SessionHandle & session,
Optional<System::Clock::Timeout> timeout = NullOptional);
#endif // CONFIG_BUILD_FOR_HOST_UNIT_TEST
private:
CHIP_ERROR FinishCommandInternal(FinishCommandParameters & aFinishCommandParams);
public:
// Sends a queued up command request to the target encapsulated by the secureSession handle.
//
// Upon successful return from this call, all subsequent errors that occur during this interaction
// will be conveyed through the OnError callback above. In addition, upon completion of work regardless of
// whether it was successful or not, the OnDone callback will be invoked to indicate completion of work on this
// object and to indicate to the application that it can destroy and free this object.
//
// Applications can, however, destroy this object at any time after this call, except while handling
// an OnResponse or OnError callback, and it will safely clean-up.
//
// If this call returns failure, the callback's OnDone will never be called; the client is responsible
// for destroying this object on failure.
//
// Client can specify the maximum time to wait for response (in milliseconds) via timeout parameter.
// Default timeout value will be used otherwise.
//
CHIP_ERROR SendCommandRequest(const SessionHandle & session, Optional<System::Clock::Timeout> timeout = NullOptional);
// Sends a queued up group command request to the target encapsulated by the secureSession handle.
//
// If this function is successful, it will invoke the OnDone callback before returning to indicate
// to the application that it can destroy and free this object.
//
CHIP_ERROR SendGroupCommandRequest(const SessionHandle & session);
private:
friend class TestCommandInteraction;
enum class State : uint8_t
{
Idle, ///< Default state that the object starts out in, where no work has commenced
AddingCommand, ///< In the process of adding a command.
AddedCommand, ///< A command has been completely encoded and is awaiting transmission.
AwaitingTimedStatus, ///< Sent a Timed Request and waiting for response.
AwaitingResponse, ///< The command has been sent successfully, and we are awaiting invoke response.
ResponseReceived, ///< Received a response to our invoke and request and processing the response.
AwaitingDestruction, ///< The object has completed its work and is awaiting destruction by the application.
};
/**
* Class to help backup CommandSender's buffer containing InvokeRequestMessage when adding InvokeRequest
* in case there is a failure to add InvokeRequest. Intended usage is as follows:
* - Allocate RollbackInvokeRequest on the stack.
* - Attempt adding InvokeRequest into InvokeRequestMessage buffer.
* - If modification is added successfully, call DisableAutomaticRollback() to prevent destructor from
* rolling back InvokeReqestMessage.
* - If there is an issue adding InvokeRequest, destructor will take care of rolling back
* InvokeRequestMessage to previously saved state.
*/
class RollbackInvokeRequest
{
public:
explicit RollbackInvokeRequest(CommandSender & aCommandSender);
~RollbackInvokeRequest();
/**
* Disables rolling back to previously saved state for InvokeRequestMessage.
*/
void DisableAutomaticRollback();
private:
CommandSender & mCommandSender;
TLV::TLVWriter mBackupWriter;
State mBackupState;
bool mRollbackInDestructor = false;
};
union CallbackHandle
{
CallbackHandle(Callback * apCallback) : legacyCallback(apCallback) {}
CallbackHandle(ExtendableCallback * apExtendableCallback) : extendableCallback(apExtendableCallback) {}
Callback * legacyCallback;
ExtendableCallback * extendableCallback;
};
void MoveToState(const State aTargetState);
const char * GetStateStr() const;
/*
* Allocates a packet buffer used for encoding an invoke request payload.
*
* This can be called multiple times safely, as it will only allocate the buffer once for the lifetime
* of this object.
*/
CHIP_ERROR AllocateBuffer();
// ExchangeDelegate interface implementation. Private so people won't
// accidentally call it on us when we're not being treated as an actual
// ExchangeDelegate.
CHIP_ERROR OnMessageReceived(Messaging::ExchangeContext * apExchangeContext, const PayloadHeader & aPayloadHeader,
System::PacketBufferHandle && aPayload) override;
void OnResponseTimeout(Messaging::ExchangeContext * apExchangeContext) override;
void FlushNoCommandResponse();
//
// Called internally to signal the completion of all work on this object, gracefully close the
// exchange (by calling into the base class) and finally, signal to the application that it's
// safe to release this object.
//
void Close();
/*
* This forcibly closes the exchange context if a valid one is pointed to. Such a situation does
* not arise during normal message processing flows that all normally call Close() above. This can only
* arise due to application-initiated destruction of the object when this object is handling receiving/sending
* message payloads.
*/
void Abort();
CHIP_ERROR ProcessInvokeResponse(System::PacketBufferHandle && payload, bool & moreChunkedMessages);
CHIP_ERROR ProcessInvokeResponseIB(InvokeResponseIB::Parser & aInvokeResponse);
void SetTimedInvokeTimeoutMs(const Optional<uint16_t> & aTimedInvokeTimeoutMs);
// Send our queued-up Invoke Request message. Assumes the exchange is ready
// and mPendingInvokeData is populated.
CHIP_ERROR SendInvokeRequest();
CHIP_ERROR Finalize(System::PacketBufferHandle & commandPacket);
CHIP_ERROR SendCommandRequestInternal(const SessionHandle & session, Optional<System::Clock::Timeout> timeout);
void OnResponseCallback(const ResponseData & aResponseData)
{
// mpExtendableCallback and mpCallback are mutually exclusive.
if (mUseExtendableCallback && mCallbackHandle.extendableCallback)
{
mCallbackHandle.extendableCallback->OnResponse(this, aResponseData);
}
else if (mCallbackHandle.legacyCallback)
{
mCallbackHandle.legacyCallback->OnResponse(this, aResponseData.path, aResponseData.statusIB, aResponseData.data);
}
}
void OnErrorCallback(CHIP_ERROR aError)
{
// mpExtendableCallback and mpCallback are mutually exclusive.
if (mUseExtendableCallback && mCallbackHandle.extendableCallback)
{
ErrorData errorData = { aError };
mCallbackHandle.extendableCallback->OnError(this, errorData);
}
else if (mCallbackHandle.legacyCallback)
{
mCallbackHandle.legacyCallback->OnError(this, aError);
}
}
void OnDoneCallback()
{
// mpExtendableCallback and mpCallback are mutually exclusive.
if (mUseExtendableCallback && mCallbackHandle.extendableCallback)
{
mCallbackHandle.extendableCallback->OnDone(this);
}
else if (mCallbackHandle.legacyCallback)
{
mCallbackHandle.legacyCallback->OnDone(this);
}
}
Messaging::ExchangeHolder mExchangeCtx;
CallbackHandle mCallbackHandle;
Messaging::ExchangeManager * mpExchangeMgr = nullptr;
InvokeRequestMessage::Builder mInvokeRequestBuilder;
// TODO Maybe we should change PacketBufferTLVWriter so we can finalize it
// but have it hold on to the buffer, and get the buffer from it later.
// Then we could avoid this extra pointer-sized member.
System::PacketBufferHandle mPendingInvokeData;
// If mTimedInvokeTimeoutMs has a value, we are expected to do a timed
// invoke.
Optional<uint16_t> mTimedInvokeTimeoutMs;
TLV::TLVType mDataElementContainerType = TLV::kTLVType_NotSpecified;
chip::System::PacketBufferTLVWriter mCommandMessageWriter;
#if CHIP_CONFIG_COMMAND_SENDER_BUILTIN_SUPPORT_FOR_BATCHED_COMMANDS
PendingResponseTrackerImpl mNonTestPendingResponseTracker;
#endif // CHIP_CONFIG_COMMAND_SENDER_BUILTIN_SUPPORT_FOR_BATCHED_COMMANDS
PendingResponseTracker * mpPendingResponseTracker = nullptr;
uint16_t mInvokeResponseMessageCount = 0;
uint16_t mFinishedCommandCount = 0;
uint16_t mRemoteMaxPathsPerInvoke = 1;
State mState = State::Idle;
bool mSuppressResponse = false;
bool mTimedRequest = false;
bool mBufferAllocated = false;
bool mBatchCommandsEnabled = false;
bool mUseExtendableCallback = false;
bool mAllowLargePayload = false;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,101 @@
/*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteCommandPath.h>
#include <app/MessageDef/StatusIB.h>
#include <lib/core/TLV.h>
namespace chip {
namespace app {
class CommandSender;
/**
* @brief Legacy callbacks for CommandSender
*
* This class exists for legacy purposes. If you are developing a new callback implementation,
* please use `CommandSender::ExtendableCallback`.
*/
class CommandSenderLegacyCallback
{
public:
virtual ~CommandSenderLegacyCallback() = default;
/**
* OnResponse will be called when a successful response from server has been received and processed.
* Specifically:
* - When a status code is received and it is IM::Success, aData will be nullptr.
* - When a data response is received, aData will point to a valid TLVReader initialized to point at the struct container
* that contains the data payload (callee will still need to open and process the container).
*
* The CommandSender object MUST continue to exist after this call is completed. The application shall wait until it
* receives an OnDone call to destroy the object.
*
* @param[in] apCommandSender The command sender object that initiated the command transaction.
* @param[in] aPath The command path field in invoke command response.
* @param[in] aStatusIB It will always have a success status. If apData is null, it can be any success status,
* including possibly a cluster-specific one. If apData is not null it aStatusIB will always
* be a generic SUCCESS status with no-cluster specific information.
* @param[in] apData The command data, will be nullptr if the server returns a StatusIB.
*/
virtual void OnResponse(CommandSender * apCommandSender, const ConcreteCommandPath & aPath, const StatusIB & aStatusIB,
TLV::TLVReader * apData)
{}
/**
* OnError will be called when an error occurs *after* a successful call to SendCommandRequest(). The following
* errors will be delivered through this call in the aError field:
*
* - CHIP_ERROR_TIMEOUT: A response was not received within the expected response timeout.
* - CHIP_ERROR_*TLV*: A malformed, non-compliant response was received from the server.
* - CHIP_ERROR encapsulating a StatusIB: If we got a non-path-specific or path-specific
* status response from the server. In that case, constructing a
* StatusIB from the error can be used to extract the status.
* - Note: a CommandSender using `CommandSender::Callback` only supports sending
* a single InvokeRequest. As a result, only one path-specific error is expected
* to ever be sent to the OnError callback.
* - CHIP_ERROR*: All other cases.
*
* The CommandSender object MUST continue to exist after this call is completed. The application shall wait until it
* receives an OnDone call to destroy and free the object.
*
* @param[in] apCommandSender The command sender object that initiated the command transaction.
* @param[in] aError A system error code that conveys the overall error code.
*/
virtual void OnError(const CommandSender * apCommandSender, CHIP_ERROR aError) {}
/**
* OnDone will be called when CommandSender has finished all work and it is safe to destroy and free the
* allocated CommandSender object.
*
* This function will:
* - Always be called exactly *once* for a given CommandSender instance.
* - Be called even in error circumstances.
* - Only be called after a successful call to SendCommandRequest returns, if SendCommandRequest is used.
* - Always be called before a successful return from SendGroupCommandRequest, if SendGroupCommandRequest is used.
*
* This function must be implemented to destroy the CommandSender object.
*
* @param[in] apCommandSender The command sender object of the terminated invoke command transaction.
*/
virtual void OnDone(CommandSender * apCommandSender) = 0;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,170 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteClusterPath.h>
#include <app/util/basic-types.h>
#include <lib/core/Optional.h>
namespace chip {
namespace app {
/**
* A representation of a concrete attribute path. This does not convey any list index specifiers.
*
* The expanded flag can be set to indicate that a concrete path was expanded from a wildcard
* or group path.
*/
struct ConcreteAttributePath : public ConcreteClusterPath
{
ConcreteAttributePath()
{
// Note: mExpanded is in the superclass, so we can't use a field
// initializer.
mExpanded = false;
}
ConcreteAttributePath(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId) :
ConcreteClusterPath(aEndpointId, aClusterId), mAttributeId(aAttributeId)
{
// Note: mExpanded is in the supercclass, so we can't use a field
// initializer.
mExpanded = false;
}
bool IsValid() const { return ConcreteClusterPath::HasValidIds() && IsValidAttributeId(mAttributeId); }
bool operator==(const ConcreteAttributePath & aOther) const
{
return ConcreteClusterPath::operator==(aOther) && (mAttributeId == aOther.mAttributeId);
}
bool operator!=(const ConcreteAttributePath & aOther) const { return !(*this == aOther); }
bool operator<(const ConcreteAttributePath & path) const
{
return (mEndpointId < path.mEndpointId) || ((mEndpointId == path.mEndpointId) && (mClusterId < path.mClusterId)) ||
((mEndpointId == path.mEndpointId) && (mClusterId == path.mClusterId) && (mAttributeId < path.mAttributeId));
}
AttributeId mAttributeId = 0;
};
/**
* A representation of a concrete path as it appears in a Read or Subscribe
* request after path expansion. This contains support for expressing an
* optional list index.
*/
struct ConcreteReadAttributePath : public ConcreteAttributePath
{
ConcreteReadAttributePath() {}
ConcreteReadAttributePath(const ConcreteAttributePath & path) : ConcreteAttributePath(path) {}
ConcreteReadAttributePath(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId) :
ConcreteAttributePath(aEndpointId, aClusterId, aAttributeId)
{}
ConcreteReadAttributePath(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId, uint16_t aListIndex) :
ConcreteAttributePath(aEndpointId, aClusterId, aAttributeId)
{
mListIndex.SetValue(aListIndex);
}
bool operator==(const ConcreteReadAttributePath & aOther) const = delete;
bool operator!=(const ConcreteReadAttributePath & aOther) const = delete;
bool operator<(const ConcreteReadAttributePath & aOther) const = delete;
Optional<uint16_t> mListIndex;
};
/**
* A representation of a concrete path as it appears in a Report or Write
* request after path expansion. This contains support for expressing list and list item-specific operations
* like replace, update, delete and append.
*/
struct ConcreteDataAttributePath : public ConcreteAttributePath
{
enum class ListOperation : uint8_t
{
NotList, // Path points to an attribute that isn't a list.
ReplaceAll, // Path points to an attribute that is a list, indicating that the contents of the list should be replaced in
// its entirety.
ReplaceItem, // Path points to a specific item in a list, indicating that that item should be replaced in its entirety.
DeleteItem, // Path points to a specific item in a list, indicating that that item should be deleted from the list.
AppendItem // Path points to an attribute that is a list, indicating that an item should be appended into the list.
};
ConcreteDataAttributePath() {}
ConcreteDataAttributePath(const ConcreteAttributePath & path) : ConcreteAttributePath(path) {}
ConcreteDataAttributePath(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId) :
ConcreteAttributePath(aEndpointId, aClusterId, aAttributeId)
{}
ConcreteDataAttributePath(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId,
const Optional<DataVersion> & aDataVersion) :
ConcreteAttributePath(aEndpointId, aClusterId, aAttributeId),
mDataVersion(aDataVersion)
{}
ConcreteDataAttributePath(EndpointId aEndpointId, ClusterId aClusterId, AttributeId aAttributeId, ListOperation aListOp,
uint16_t aListIndex) :
ConcreteAttributePath(aEndpointId, aClusterId, aAttributeId)
{
mListOp = aListOp;
mListIndex = aListIndex;
}
bool IsListOperation() const { return mListOp != ListOperation::NotList; }
bool IsListItemOperation() const { return ((mListOp != ListOperation::NotList) && (mListOp != ListOperation::ReplaceAll)); }
void LogPath() const
{
ChipLogProgress(DataManagement, "Concrete Attribute Path: (%d, " ChipLogFormatMEI ", " ChipLogFormatMEI ") ", mEndpointId,
ChipLogValueMEI(mClusterId), ChipLogValueMEI(mAttributeId));
}
bool MatchesConcreteAttributePath(const ConcreteAttributePath & aOther) const
{
return ConcreteAttributePath::operator==(aOther);
}
bool operator==(const ConcreteDataAttributePath & aOther) const
{
return ConcreteAttributePath::operator==(aOther) && (mListIndex == aOther.mListIndex) && (mListOp == aOther.mListOp) &&
(mDataVersion == aOther.mDataVersion);
}
bool operator!=(const ConcreteDataAttributePath & aOther) const { return !(*this == aOther); }
bool operator<(const ConcreteDataAttributePath & aOther) const = delete;
//
// This index is only valid if `mListOp` is set to a list item operation, i.e
// ReplaceItem, DeleteItem or AppendItem. Otherwise, it is to be ignored.
//
uint16_t mListIndex = 0;
ListOperation mListOp = ListOperation::NotList;
Optional<DataVersion> mDataVersion = NullOptional;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,60 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/util/basic-types.h>
namespace chip {
namespace app {
/**
* A representation of a concrete cluster path. This identifies a specific
* cluster instance.
*/
struct ConcreteClusterPath
{
ConcreteClusterPath(EndpointId aEndpointId, ClusterId aClusterId) : mEndpointId(aEndpointId), mClusterId(aClusterId) {}
ConcreteClusterPath() {}
ConcreteClusterPath(const ConcreteClusterPath & aOther) = default;
ConcreteClusterPath & operator=(const ConcreteClusterPath & aOther) = default;
bool IsValidConcreteClusterPath() const { return !(mEndpointId == kInvalidEndpointId || mClusterId == kInvalidClusterId); }
bool HasValidIds() const { return IsValidEndpointId(mEndpointId) && IsValidClusterId(mClusterId); }
bool operator==(const ConcreteClusterPath & aOther) const
{
return mEndpointId == aOther.mEndpointId && mClusterId == aOther.mClusterId;
}
bool operator!=(const ConcreteClusterPath & aOther) const { return !(*this == aOther); }
EndpointId mEndpointId = 0;
// Note: not all subclasses of ConcreteClusterPath need mExpanded, but due
// to alignment requirements it's "free" in the sense of not needing more
// memory to put it here. But we don't initialize it, because that
// increases codesize for the non-consumers.
bool mExpanded; // NOTE: in between larger members, NOT initialized (see above)
ClusterId mClusterId = 0;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,48 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteClusterPath.h>
#include <app/util/basic-types.h>
namespace chip {
namespace app {
/**
* A representation of a concrete invoke path.
*/
struct ConcreteCommandPath : public ConcreteClusterPath
{
ConcreteCommandPath(EndpointId aEndpointId, ClusterId aClusterId, CommandId aCommandId) :
ConcreteClusterPath(aEndpointId, aClusterId), mCommandId(aCommandId)
{}
ConcreteCommandPath() : ConcreteClusterPath(kInvalidEndpointId, kInvalidClusterId), mCommandId(kInvalidCommandId) {}
bool operator==(const ConcreteCommandPath & aOther) const
{
return ConcreteClusterPath::operator==(aOther) && (mCommandId == aOther.mCommandId);
}
bool operator!=(const ConcreteCommandPath & aOther) const { return !(*this == aOther); }
CommandId mCommandId = 0;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,57 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteClusterPath.h>
#include <app/util/basic-types.h>
namespace chip {
namespace app {
/**
* A representation of a concrete event path.
*/
struct ConcreteEventPath : public ConcreteClusterPath
{
ConcreteEventPath(EndpointId aEndpointId, ClusterId aClusterId, EventId aEventId) :
ConcreteClusterPath(aEndpointId, aClusterId), mEventId(aEventId)
{}
ConcreteEventPath() {}
ConcreteEventPath(const ConcreteEventPath & aOther) = default;
ConcreteEventPath & operator=(const ConcreteEventPath & aOther) = default;
bool operator==(const ConcreteEventPath & aOther) const
{
return ConcreteClusterPath::operator==(aOther) && (mEventId == aOther.mEventId);
}
bool operator!=(const ConcreteEventPath & aOther) const { return !(*this == aOther); }
bool operator<(const ConcreteEventPath & path) const
{
return (mEndpointId < path.mEndpointId) || ((mEndpointId == path.mEndpointId) && (mClusterId < path.mClusterId)) ||
((mEndpointId == path.mEndpointId) && (mClusterId == path.mClusterId) && (mEventId < path.mEventId));
}
EventId mEventId = 0;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,49 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/util/basic-types.h>
#include <lib/core/Optional.h>
namespace chip {
namespace app {
struct DataVersionFilter
{
DataVersionFilter(EndpointId aEndpointId, ClusterId aClusterId, DataVersion aDataVersion) :
mClusterId(aClusterId), mDataVersion(aDataVersion), mEndpointId(aEndpointId)
{}
DataVersionFilter() {}
bool IsValidDataVersionFilter() const
{
return (mEndpointId != kInvalidEndpointId) && (mClusterId != kInvalidClusterId) && (mDataVersion.HasValue());
}
bool operator==(const DataVersionFilter & aOther) const
{
return mEndpointId == aOther.mEndpointId && mClusterId == aOther.mClusterId && mDataVersion == aOther.mDataVersion;
}
ClusterId mClusterId = kInvalidClusterId; // uint32
Optional<DataVersion> mDataVersion; // uint32
EndpointId mEndpointId = kInvalidEndpointId; // uint16
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,151 @@
/*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/DefaultAttributePersistenceProvider.h>
#include <app/util/ember-strings.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/DefaultStorageKeyAllocator.h>
#include <lib/support/SafeInt.h>
namespace chip {
namespace app {
CHIP_ERROR DefaultAttributePersistenceProvider::InternalWriteValue(const StorageKeyName & aKey, const ByteSpan & aValue)
{
VerifyOrReturnError(mStorage != nullptr, CHIP_ERROR_INCORRECT_STATE);
// TODO: we may want to have a small cache for values that change a lot, so
// we only write them once a bunch of changes happen or on timer or
// shutdown.
if (!CanCastTo<uint16_t>(aValue.size()))
{
return CHIP_ERROR_BUFFER_TOO_SMALL;
}
return mStorage->SyncSetKeyValue(aKey.KeyName(), aValue.data(), static_cast<uint16_t>(aValue.size()));
}
CHIP_ERROR DefaultAttributePersistenceProvider::InternalReadValue(const StorageKeyName & aKey, MutableByteSpan & aValue)
{
VerifyOrReturnError(mStorage != nullptr, CHIP_ERROR_INCORRECT_STATE);
uint16_t size = static_cast<uint16_t>(min(aValue.size(), static_cast<size_t>(UINT16_MAX)));
ReturnErrorOnFailure(mStorage->SyncGetKeyValue(aKey.KeyName(), aValue.data(), size));
aValue.reduce_size(size);
return CHIP_NO_ERROR;
}
CHIP_ERROR DefaultAttributePersistenceProvider::InternalReadValue(const StorageKeyName & aKey, EmberAfAttributeType aType,
size_t aExpectedSize, MutableByteSpan & aValue)
{
ReturnErrorOnFailure(InternalReadValue(aKey, aValue));
size_t size = aValue.size();
if (emberAfIsStringAttributeType(aType))
{
// Ensure that we've read enough bytes that we are not ending up with
// un-initialized memory. Should have read length + 1 (for the length
// byte).
VerifyOrReturnError(size >= 1 && size - 1 >= emberAfStringLength(aValue.data()), CHIP_ERROR_INCORRECT_STATE);
}
else if (emberAfIsLongStringAttributeType(aType))
{
// Ensure that we've read enough bytes that we are not ending up with
// un-initialized memory. Should have read length + 2 (for the length
// bytes).
VerifyOrReturnError(size >= 2 && size - 2 >= emberAfLongStringLength(aValue.data()), CHIP_ERROR_INCORRECT_STATE);
}
else
{
// Ensure we got the expected number of bytes for all other types.
VerifyOrReturnError(size == aExpectedSize, CHIP_ERROR_INVALID_ARGUMENT);
}
return CHIP_NO_ERROR;
}
CHIP_ERROR DefaultAttributePersistenceProvider::WriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue)
{
return InternalWriteValue(DefaultStorageKeyAllocator::AttributeValue(aPath.mEndpointId, aPath.mClusterId, aPath.mAttributeId),
aValue);
}
CHIP_ERROR DefaultAttributePersistenceProvider::ReadValue(const ConcreteAttributePath & aPath,
const EmberAfAttributeMetadata * aMetadata, MutableByteSpan & aValue)
{
return InternalReadValue(DefaultStorageKeyAllocator::AttributeValue(aPath.mEndpointId, aPath.mClusterId, aPath.mAttributeId),
aMetadata->attributeType, aMetadata->size, aValue);
}
CHIP_ERROR DefaultAttributePersistenceProvider::SafeWriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue)
{
return InternalWriteValue(
DefaultStorageKeyAllocator::SafeAttributeValue(aPath.mEndpointId, aPath.mClusterId, aPath.mAttributeId), aValue);
}
CHIP_ERROR DefaultAttributePersistenceProvider::SafeReadValue(const ConcreteAttributePath & aPath, MutableByteSpan & aValue)
{
return InternalReadValue(
DefaultStorageKeyAllocator::SafeAttributeValue(aPath.mEndpointId, aPath.mClusterId, aPath.mAttributeId), aValue);
}
namespace {
AttributePersistenceProvider * gAttributeSaver = nullptr;
} // anonymous namespace
/**
* Gets the global attribute saver.
*
* Note: When storing cluster attributes that are managed via AttributeAccessInterface, it is recommended to
* use SafeAttributePersistenceProvider. See AttributePersistenceProvider and SafeAttributePersistenceProvider
* class documentation for more information.
*/
AttributePersistenceProvider * GetAttributePersistenceProvider()
{
return gAttributeSaver;
}
void SetAttributePersistenceProvider(AttributePersistenceProvider * aProvider)
{
if (aProvider != nullptr)
{
gAttributeSaver = aProvider;
}
}
namespace {
SafeAttributePersistenceProvider * gSafeAttributeSaver = nullptr;
} // anonymous namespace
/**
* Gets the global attribute safe saver.
*/
SafeAttributePersistenceProvider * GetSafeAttributePersistenceProvider()
{
return gSafeAttributeSaver;
}
void SetSafeAttributePersistenceProvider(SafeAttributePersistenceProvider * aProvider)
{
if (aProvider != nullptr)
{
gSafeAttributeSaver = aProvider;
}
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,72 @@
/*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/AttributePersistenceProvider.h>
#include <app/SafeAttributePersistenceProvider.h>
#include <lib/core/CHIPPersistentStorageDelegate.h>
#include <lib/support/DefaultStorageKeyAllocator.h>
namespace chip {
namespace app {
/**
* Default implementation of AttributePersistenceProvider. This uses
* PersistentStorageDelegate to store the attribute values.
*
* NOTE: SetAttributePersistenceProvider must still be called with an instance
* of this class, since it can't be constructed automatically without knowing
* what PersistentStorageDelegate is to be used.
*/
class DefaultAttributePersistenceProvider : public AttributePersistenceProvider, public SafeAttributePersistenceProvider
{
public:
DefaultAttributePersistenceProvider() {}
// Passed-in storage must outlive this object.
CHIP_ERROR Init(PersistentStorageDelegate * storage)
{
if (storage == nullptr)
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
mStorage = storage;
return CHIP_NO_ERROR;
}
void Shutdown() {}
// AttributePersistenceProvider implementation.
CHIP_ERROR WriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue) override;
CHIP_ERROR ReadValue(const ConcreteAttributePath & aPath, const EmberAfAttributeMetadata * aMetadata,
MutableByteSpan & aValue) override;
// SafeAttributePersistenceProvider implementation.
CHIP_ERROR SafeWriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue) override;
CHIP_ERROR SafeReadValue(const ConcreteAttributePath & aPath, MutableByteSpan & aValue) override;
protected:
PersistentStorageDelegate * mStorage;
private:
CHIP_ERROR InternalWriteValue(const StorageKeyName & aKey, const ByteSpan & aValue);
CHIP_ERROR InternalReadValue(const StorageKeyName & aKey, MutableByteSpan & aValue);
CHIP_ERROR InternalReadValue(const StorageKeyName & aKey, EmberAfAttributeType aType, size_t aExpectedSize,
MutableByteSpan & aValue);
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,98 @@
/*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <app/DeferredAttributePersistenceProvider.h>
#include <platform/CHIPDeviceLayer.h>
namespace chip {
namespace app {
CHIP_ERROR DeferredAttribute::PrepareWrite(System::Clock::Timestamp flushTime, const ByteSpan & value)
{
mFlushTime = flushTime;
if (mValue.AllocatedSize() != value.size())
{
mValue.Alloc(value.size());
ReturnErrorCodeIf(!mValue, CHIP_ERROR_NO_MEMORY);
}
memcpy(mValue.Get(), value.data(), value.size());
return CHIP_NO_ERROR;
}
void DeferredAttribute::Flush(AttributePersistenceProvider & persister)
{
VerifyOrReturn(IsArmed());
persister.WriteValue(mPath, ByteSpan(mValue.Get(), mValue.AllocatedSize()));
mValue.Free();
}
CHIP_ERROR DeferredAttributePersistenceProvider::WriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue)
{
for (DeferredAttribute & da : mDeferredAttributes)
{
if (da.Matches(aPath))
{
ReturnErrorOnFailure(da.PrepareWrite(System::SystemClock().GetMonotonicTimestamp() + mWriteDelay, aValue));
FlushAndScheduleNext();
return CHIP_NO_ERROR;
}
}
return mPersister.WriteValue(aPath, aValue);
}
CHIP_ERROR DeferredAttributePersistenceProvider::ReadValue(const ConcreteAttributePath & aPath,
const EmberAfAttributeMetadata * aMetadata, MutableByteSpan & aValue)
{
return mPersister.ReadValue(aPath, aMetadata, aValue);
}
void DeferredAttributePersistenceProvider::FlushAndScheduleNext()
{
const System::Clock::Timestamp now = System::SystemClock().GetMonotonicTimestamp();
System::Clock::Timestamp nextFlushTime = System::Clock::Timestamp::max();
for (DeferredAttribute & da : mDeferredAttributes)
{
if (!da.IsArmed())
{
continue;
}
if (da.GetFlushTime() <= now)
{
da.Flush(mPersister);
}
else
{
nextFlushTime = chip::min(nextFlushTime, da.GetFlushTime());
}
}
if (nextFlushTime != System::Clock::Timestamp::max())
{
DeviceLayer::SystemLayer().StartTimer(
nextFlushTime - now,
[](System::Layer *, void * me) { static_cast<DeferredAttributePersistenceProvider *>(me)->FlushAndScheduleNext(); },
this);
}
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,83 @@
/*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/AttributePersistenceProvider.h>
#include <lib/support/ScopedBuffer.h>
#include <lib/support/Span.h>
#include <system/SystemClock.h>
namespace chip {
namespace app {
class DeferredAttribute
{
public:
explicit DeferredAttribute(const ConcreteAttributePath & path) : mPath(path) {}
bool Matches(const ConcreteAttributePath & path) const { return mPath == path; }
bool IsArmed() const { return static_cast<bool>(mValue); }
System::Clock::Timestamp GetFlushTime() const { return mFlushTime; }
CHIP_ERROR PrepareWrite(System::Clock::Timestamp flushTime, const ByteSpan & value);
void Flush(AttributePersistenceProvider & persister);
private:
const ConcreteAttributePath mPath;
System::Clock::Timestamp mFlushTime;
Platform::ScopedMemoryBufferWithSize<uint8_t> mValue;
};
/**
* Decorator class for the AttributePersistenceProvider implementation that
* defers writes of selected attributes.
*
* This class is useful to increase the flash lifetime by reducing the number
* of writes of fast-changing attributes, such as CurrentLevel attribute of the
* LevelControl cluster.
*/
class DeferredAttributePersistenceProvider : public AttributePersistenceProvider
{
public:
DeferredAttributePersistenceProvider(AttributePersistenceProvider & persister,
const Span<DeferredAttribute> & deferredAttributes,
System::Clock::Milliseconds32 writeDelay) :
mPersister(persister),
mDeferredAttributes(deferredAttributes), mWriteDelay(writeDelay)
{}
/*
* If the written attribute is one of the deferred attributes specified in the constructor,
* postpone the write operation by the configured delay. If this attribute changes within the
* delay period, further postpone the operation so that the actual write happens once the
* attribute has remained constant for the write delay period.
*
* For other attributes, immediately pass the write operation to the decorated persister.
*/
CHIP_ERROR WriteValue(const ConcreteAttributePath & aPath, const ByteSpan & aValue) override;
CHIP_ERROR ReadValue(const ConcreteAttributePath & aPath, const EmberAfAttributeMetadata * aMetadata,
MutableByteSpan & aValue) override;
private:
void FlushAndScheduleNext();
AttributePersistenceProvider & mPersister;
const Span<DeferredAttribute> mDeferredAttributes;
const System::Clock::Milliseconds32 mWriteDelay;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,59 @@
/*
*
* Copyright (c) 2020-2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* This file contains implementation of Device class. The objects of this
* class will be used by Controller applications to interact with CHIP
* devices. The class provides mechanism to construct, send and receive
* messages to and from the corresponding CHIP devices.
*/
#include <app/DeviceProxy.h>
#include <app/CommandSender.h>
#include <app/ReadPrepareParams.h>
#include <lib/core/CHIPCore.h>
#include <lib/core/CHIPEncoding.h>
#include <lib/dnssd/Resolver.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/logging/CHIPLogging.h>
using namespace chip::Callback;
namespace chip {
CHIP_ERROR DeviceProxy::SendCommands(app::CommandSender * commandObj, Optional<System::Clock::Timeout> timeout)
{
VerifyOrReturnLogError(IsSecureConnected(), CHIP_ERROR_INCORRECT_STATE);
VerifyOrReturnError(commandObj != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
return commandObj->SendCommandRequest(GetSecureSession().Value(), timeout);
}
CHIP_ERROR DeviceProxy::GetAttestationChallenge(ByteSpan & attestationChallenge)
{
Optional<SessionHandle> secureSessionHandle;
secureSessionHandle = GetSecureSession();
VerifyOrReturnError(secureSessionHandle.HasValue(), CHIP_ERROR_INCORRECT_STATE);
attestationChallenge = secureSessionHandle.Value()->AsSecureSession()->GetCryptoContext().GetAttestationChallenge();
return CHIP_NO_ERROR;
}
} // namespace chip

View File

@@ -0,0 +1,81 @@
/*
*
* Copyright (c) 2020-2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* This file contains definitions for DeviceProxy base class. The objects of this
* class will be used by applications to interact with peer CHIP devices.
* The class provides mechanism to construct, send and receive messages to and
* from the corresponding CHIP devices.
*/
#pragma once
#include <app/CommandSender.h>
#include <lib/core/CHIPCallback.h>
#include <lib/core/CHIPCore.h>
#include <lib/support/DLLUtil.h>
#include <system/SystemClock.h>
namespace chip {
class DLL_EXPORT DeviceProxy
{
public:
virtual ~DeviceProxy() {}
DeviceProxy() {}
/**
* Mark any open session with the device as expired.
*/
virtual void Disconnect() = 0;
virtual NodeId GetDeviceId() const = 0;
virtual CHIP_ERROR SendCommands(app::CommandSender * commandObj, chip::Optional<System::Clock::Timeout> timeout = NullOptional);
virtual Messaging::ExchangeManager * GetExchangeManager() const = 0;
virtual chip::Optional<SessionHandle> GetSecureSession() const = 0;
virtual CHIP_ERROR SetPeerId(ByteSpan rcac, ByteSpan noc) { return CHIP_ERROR_NOT_IMPLEMENTED; }
/**
* Facilities for keeping track of the latest point we can expect the
* fail-safe to last through. These timestamp values use the monotonic clock.
*/
void SetFailSafeExpirationTimestamp(System::Clock::Timestamp timestamp) { mFailSafeExpirationTimestamp = timestamp; }
System::Clock::Timestamp GetFailSafeExpirationTimestamp() const { return mFailSafeExpirationTimestamp; }
/**
* @brief
* This function returns the attestation challenge for the secure session.
*
* @param[out] attestationChallenge The output for the attestationChallenge
*
* @return CHIP_ERROR CHIP_NO_ERROR on success, or CHIP_ERROR_INVALID_ARGUMENT if no secure session is active
*/
virtual CHIP_ERROR GetAttestationChallenge(ByteSpan & attestationChallenge);
protected:
virtual bool IsSecureConnected() const = 0;
System::Clock::Timestamp mFailSafeExpirationTimestamp = System::Clock::kZero;
};
} // namespace chip

View File

@@ -0,0 +1,42 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "EventLoggingTypes.h"
#include <app/ConcreteEventPath.h>
#include <app/util/basic-types.h>
namespace chip {
namespace app {
struct EventHeader
{
ConcreteEventPath mPath;
EventNumber mEventNumber = 0;
PriorityLevel mPriorityLevel = PriorityLevel::Invalid;
Timestamp mTimestamp;
void LogPath() const
{
ChipLogProgress(DataManagement, "Concrete Event Path: (%d, " ChipLogFormatMEI ", " ChipLogFormatMEI ") ", mPath.mEndpointId,
ChipLogValueMEI(mPath.mClusterId), ChipLogValueMEI(mPath.mEventId));
}
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,106 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteEventPath.h>
#include <app/EventLoggingDelegate.h>
#include <app/EventManagement.h>
#include <app/data-model/Encode.h>
#include <app/data-model/FabricScoped.h>
#include <app/data-model/List.h> // So we can encode lists
namespace chip {
namespace app {
template <typename T>
class EventLogger : public EventLoggingDelegate
{
public:
EventLogger(const T & aEventData) : mEventData(aEventData){};
CHIP_ERROR WriteEvent(chip::TLV::TLVWriter & aWriter) final override
{
return DataModel::Encode(aWriter, TLV::ContextTag(EventDataIB::Tag::kData), mEventData);
}
private:
const T & mEventData;
};
/**
* @brief
* Log an event via a EventLoggingDelegate, with options.
*
* The EventLoggingDelegate writes the event metadata and calls the `apDelegate`
* with an TLV::TLVWriter reference so that the user code can emit
* the event data directly into the event log. This form of event
* logging minimizes memory consumption, as event data is serialized
* directly into the target buffer. The event data MUST contain
* context tags to be interpreted within the schema identified by
* `ClusterID` and `EventId`.
*
* The consumer has to either lock the Matter stack lock or queue the event to
* the Matter event queue when using LogEvent. This function is not safe to call
* outside of the main Matter processing context.
*
* LogEvent has 2 variant, one for fabric-scoped events and one for non-fabric-scoped events.
* @param[in] aEventData The event cluster object
* @param[in] aEndpoint The current cluster's Endpoint Id
* @param[out] aEventNumber The event Number if the event was written to the
* log, 0 otherwise. The Event number is expected to monotonically increase.
*
* @return CHIP_ERROR CHIP Error Code
*/
template <typename T, std::enable_if_t<DataModel::IsFabricScoped<T>::value, bool> = true>
CHIP_ERROR LogEvent(const T & aEventData, EndpointId aEndpoint, EventNumber & aEventNumber)
{
EventLogger<T> eventData(aEventData);
ConcreteEventPath path(aEndpoint, aEventData.GetClusterId(), aEventData.GetEventId());
EventManagement & logMgmt = chip::app::EventManagement::GetInstance();
EventOptions eventOptions;
eventOptions.mPath = path;
eventOptions.mPriority = aEventData.GetPriorityLevel();
eventOptions.mFabricIndex = aEventData.GetFabricIndex();
// this skips logging the event if it's fabric-scoped but no fabric association exists yet.
VerifyOrReturnError(eventOptions.mFabricIndex != kUndefinedFabricIndex, CHIP_ERROR_INVALID_FABRIC_INDEX);
//
// Unlike attributes which have a different 'EncodeForRead' for fabric-scoped structs,
// fabric-sensitive events don't require that since the actual omission of the event in its entirety
// happens within the event management framework itself at the time of access.
//
// The 'mFabricIndex' field in the event options above is encoded out-of-band alongside the event payload
// and used to match against the accessing fabric.
//
return logMgmt.LogEvent(&eventData, eventOptions, aEventNumber);
}
template <typename T, std::enable_if_t<!DataModel::IsFabricScoped<T>::value, bool> = true>
CHIP_ERROR LogEvent(const T & aEventData, EndpointId aEndpoint, EventNumber & aEventNumber)
{
EventLogger<T> eventData(aEventData);
ConcreteEventPath path(aEndpoint, aEventData.GetClusterId(), aEventData.GetEventId());
EventManagement & logMgmt = chip::app::EventManagement::GetInstance();
EventOptions eventOptions;
eventOptions.mPath = path;
eventOptions.mPriority = aEventData.GetPriorityLevel();
return logMgmt.LogEvent(&eventData, eventOptions, aEventNumber);
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,69 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* This file defines the classes corresponding to CHIP Interaction Model Event Generatorr Delegate.
*
*/
#pragma once
#include <lib/core/TLV.h>
namespace chip {
namespace app {
/**
* An EventLoggingDelegate is used to fill event log data with cluster-specific information.
*
* Allows application to append any type of TLV data as part of an event log entry. Events
* have a standard header applicable to all events and this class provides the
* ability to add additional data past such standard header.
*/
class EventLoggingDelegate
{
public:
virtual ~EventLoggingDelegate() {}
/**
* @brief
* A function that supplies eventData element for the event logging subsystem.
*
* Functions of this type are expected to provide the eventData
* element for the event logging subsystem. The functions of this
* type are called after the event subsystem has generated all
* required event metadata. The function is called with a
* chip::TLV::TLVWriter object into which it will emit a single TLV element
* tagged kTag_EventData; the value of that element MUST be a
* structure containing the event data. The event data itself must
* be structured using context tags.
*
*
* @param[in,out] aWriter A reference to the chip::TLV::TLVWriter object to be
* used for event data serialization.
*
* @retval #CHIP_NO_ERROR On success.
*
* @retval other An appropriate error signaling to the
* caller that the serialization of event
* data could not be completed.
*
*/
virtual CHIP_ERROR WriteEvent(chip::TLV::TLVWriter & aWriter) = 0;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,161 @@
/**
*
* Copyright (c) 2021 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <access/SubjectDescriptor.h>
#include <app/EventPathParams.h>
#include <app/util/basic-types.h>
#include <lib/core/CHIPCore.h>
#include <lib/core/Optional.h>
#include <lib/core/TLV.h>
#include <lib/support/LinkedList.h>
#include <system/SystemPacketBuffer.h>
inline constexpr size_t kNumPriorityLevel = 3;
namespace chip {
namespace app {
/**
* @brief
* The Priority of the log entry.
*
* @details
* Priority is used as a way to filter events before they are
* actually emitted into the log. After the event is in the log, we
* make no further provisions to expunge it from the log.
* The priority level serves to prioritize event storage. If an
* event of high priority is added to a full buffer, events are
* dropped in order of priority (and age) to accommodate it. As such,
* priority levels only have relative value. If a system is
* using only one priority level, events are dropped only in order
* of age, like a ring buffer.
*/
enum class PriorityLevel : uint8_t
{
First = 0,
/**
* Debug priority denotes log entries of interest to the
* developers of the system and is used primarily in the
* development phase. Debug priority logs are
* not accounted for in the bandwidth or power budgets of the
* constrained devices; as a result, they must be used only over
* a limited time span in production systems.
*/
Debug = First,
/**
* Info priority denotes log entries that provide extra insight
* and diagnostics into the running system. Info logging level may
* be used over an extended period of time in a production system,
* or may be used as the default log level in a field trial. On
* the constrained devices, the entries logged with Info level must
* be accounted for in the bandwidth and memory budget, but not in
* the power budget.
*/
Info = 1,
/**
* Critical priority denotes events whose loss would
* directly impact customer-facing features. Applications may use
* loss of Production Critical events to indicate system failure.
* On constrained devices, entries logged with Critical
* priority must be accounted for in the power and memory budget,
* as it is expected that they are always logged and offloaded
* from the device.
*/
Critical = 2,
Last = Critical,
Invalid = Last + 1,
};
static_assert(sizeof(std::underlying_type_t<PriorityLevel>) <= sizeof(unsigned),
"Logging that converts PriorityLevel to unsigned will be lossy");
/**
* @brief
* The struct that provides an application set System or Epoch timestamp.
*/
struct Timestamp
{
enum class Type : uint8_t
{
kSystem = 0,
kEpoch
};
Timestamp() {}
Timestamp(Type aType, uint64_t aValue) : mType(aType), mValue(aValue) {}
Timestamp(System::Clock::Timestamp aValue) : mType(Type::kSystem), mValue(aValue.count()) {}
static Timestamp Epoch(System::Clock::Timestamp aValue)
{
Timestamp timestamp(Type::kEpoch, aValue.count());
return timestamp;
}
static Timestamp System(System::Clock::Timestamp aValue)
{
Timestamp timestamp(Type::kSystem, aValue.count());
return timestamp;
}
bool IsSystem() const { return mType == Type::kSystem; }
bool IsEpoch() const { return mType == Type::kEpoch; }
Type mType = Type::kSystem;
uint64_t mValue = 0;
};
/**
* The structure that provides options for the different event fields.
*/
class EventOptions
{
public:
EventOptions() : mPriority(PriorityLevel::Invalid) {}
EventOptions(Timestamp aTimestamp) : mTimestamp(aTimestamp), mPriority(PriorityLevel::Invalid) {}
ConcreteEventPath mPath;
Timestamp mTimestamp;
PriorityLevel mPriority = PriorityLevel::Invalid;
// kUndefinedFabricIndex 0 means not fabric associated at all
FabricIndex mFabricIndex = kUndefinedFabricIndex;
};
/**
* @brief
* Structure for copying event lists on output.
*/
struct EventLoadOutContext
{
EventLoadOutContext(TLV::TLVWriter & aWriter, PriorityLevel aPriority, EventNumber aStartingEventNumber) :
mWriter(aWriter), mPriority(aPriority), mStartingEventNumber(aStartingEventNumber), mCurrentEventNumber(0), mFirst(true)
{}
TLV::TLVWriter & mWriter;
PriorityLevel mPriority = PriorityLevel::Invalid;
EventNumber mStartingEventNumber = 0;
Timestamp mPreviousTime;
Timestamp mCurrentTime;
EventNumber mCurrentEventNumber = 0;
size_t mEventCount = 0;
const SingleLinkedListNode<EventPathParams> * mpInterestedEventPaths = nullptr;
bool mFirst = true;
Access::SubjectDescriptor mSubjectDescriptor;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,925 @@
/**
*
* Copyright (c) 2021 Project CHIP Authors
* Copyright (c) 2015-2017 Nest Labs, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <access/AccessControl.h>
#include <access/RequestPath.h>
#include <access/SubjectDescriptor.h>
#include <app/EventManagement.h>
#include <app/InteractionModelEngine.h>
#include <app/RequiredPrivilege.h>
#include <assert.h>
#include <inttypes.h>
#include <lib/core/TLVUtilities.h>
#include <lib/support/CodeUtils.h>
#include <lib/support/logging/CHIPLogging.h>
using namespace chip::TLV;
namespace chip {
namespace app {
static EventManagement sInstance;
/**
* @brief
* A TLVReader backed by CircularEventBuffer
*/
class CircularEventReader : public TLV::TLVReader
{
public:
/**
* @brief
* Initializes a TLVReader object backed by CircularEventBuffer
*
* Reading begins in the CircularTLVBuffer belonging to this
* CircularEventBuffer. When the reader runs out of data, it begins
* to read from the previous CircularEventBuffer.
*
* @param[in] apBuf A pointer to a fully initialized CircularEventBuffer
*
*/
void Init(CircularEventBufferWrapper * apBuf);
virtual ~CircularEventReader() = default;
};
EventManagement & EventManagement::GetInstance()
{
return sInstance;
}
struct ReclaimEventCtx
{
CircularEventBuffer * mpEventBuffer = nullptr;
size_t mSpaceNeededForMovedEvent = 0;
};
/**
* @brief
* Internal structure for traversing event list.
*/
struct CopyAndAdjustDeltaTimeContext
{
CopyAndAdjustDeltaTimeContext(TLVWriter * aWriter, EventLoadOutContext * inContext) : mpWriter(aWriter), mpContext(inContext) {}
TLV::TLVWriter * mpWriter = nullptr;
EventLoadOutContext * mpContext = nullptr;
};
void EventManagement::Init(Messaging::ExchangeManager * apExchangeManager, uint32_t aNumBuffers,
CircularEventBuffer * apCircularEventBuffer, const LogStorageResources * const apLogStorageResources,
MonotonicallyIncreasingCounter<EventNumber> * apEventNumberCounter,
System::Clock::Milliseconds64 aMonotonicStartupTime)
{
CircularEventBuffer * current = nullptr;
CircularEventBuffer * prev = nullptr;
CircularEventBuffer * next = nullptr;
if (aNumBuffers == 0)
{
ChipLogError(EventLogging, "Invalid aNumBuffers");
return;
}
if (mState != EventManagementStates::Shutdown)
{
ChipLogError(EventLogging, "Invalid EventManagement State");
return;
}
mpExchangeMgr = apExchangeManager;
for (uint32_t bufferIndex = 0; bufferIndex < aNumBuffers; bufferIndex++)
{
next = (bufferIndex < aNumBuffers - 1) ? &apCircularEventBuffer[bufferIndex + 1] : nullptr;
current = &apCircularEventBuffer[bufferIndex];
current->Init(apLogStorageResources[bufferIndex].mpBuffer, apLogStorageResources[bufferIndex].mBufferSize, prev, next,
apLogStorageResources[bufferIndex].mPriority);
prev = current;
current->mProcessEvictedElement = nullptr;
current->mAppData = nullptr;
}
mpEventNumberCounter = apEventNumberCounter;
mLastEventNumber = mpEventNumberCounter->GetValue();
mpEventBuffer = apCircularEventBuffer;
mState = EventManagementStates::Idle;
mBytesWritten = 0;
mMonotonicStartupTime = aMonotonicStartupTime;
}
CHIP_ERROR EventManagement::CopyToNextBuffer(CircularEventBuffer * apEventBuffer)
{
CircularTLVWriter writer;
CircularTLVReader reader;
CHIP_ERROR err = CHIP_NO_ERROR;
CircularEventBuffer * nextBuffer = apEventBuffer->GetNextCircularEventBuffer();
if (nextBuffer == nullptr)
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
CircularEventBuffer backup = *nextBuffer;
// Set up the next buffer s.t. it fails if needs to evict an element
nextBuffer->mProcessEvictedElement = AlwaysFail;
writer.Init(*nextBuffer);
// Set up the reader s.t. it is positioned to read the head event
reader.Init(*apEventBuffer);
err = reader.Next();
SuccessOrExit(err);
err = writer.CopyElement(reader);
SuccessOrExit(err);
err = writer.Finalize();
SuccessOrExit(err);
ChipLogDetail(EventLogging, "Copy Event to next buffer with priority %u", static_cast<unsigned>(nextBuffer->GetPriority()));
exit:
if (err != CHIP_NO_ERROR)
{
*nextBuffer = backup;
}
return err;
}
CHIP_ERROR EventManagement::EnsureSpaceInCircularBuffer(size_t aRequiredSpace, PriorityLevel aPriority)
{
CHIP_ERROR err = CHIP_NO_ERROR;
size_t requiredSpace = aRequiredSpace;
CircularEventBuffer * eventBuffer = mpEventBuffer;
ReclaimEventCtx ctx;
// Check that we have this much space in all our event buffers that might
// hold the event. If we do not, that will prevent the event from being
// properly evicted into higher-priority buffers. We want to discover
// this early, so that testing surfaces the need to make those buffers
// larger.
for (auto * currentBuffer = mpEventBuffer; currentBuffer; currentBuffer = currentBuffer->GetNextCircularEventBuffer())
{
VerifyOrExit(requiredSpace <= currentBuffer->GetTotalDataLength(), err = CHIP_ERROR_BUFFER_TOO_SMALL);
if (currentBuffer->IsFinalDestinationForPriority(aPriority))
{
break;
}
}
VerifyOrExit(eventBuffer != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
// check whether we actually need to do anything, exit if we don't
VerifyOrExit(requiredSpace > eventBuffer->AvailableDataLength(), err = CHIP_NO_ERROR);
while (true)
{
if (requiredSpace > eventBuffer->AvailableDataLength())
{
ctx.mpEventBuffer = eventBuffer;
ctx.mSpaceNeededForMovedEvent = 0;
eventBuffer->mProcessEvictedElement = EvictEvent;
eventBuffer->mAppData = &ctx;
err = eventBuffer->EvictHead();
// one of two things happened: either the element was evicted immediately if the head's priority is same as current
// buffer(final one), or we figured out how much space we need to evict it into the next buffer, the check happens in
// EvictEvent function
if (err != CHIP_NO_ERROR)
{
VerifyOrExit(ctx.mSpaceNeededForMovedEvent != 0, /* no-op, return err */);
VerifyOrExit(eventBuffer->GetNextCircularEventBuffer() != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
if (ctx.mSpaceNeededForMovedEvent <= eventBuffer->GetNextCircularEventBuffer()->AvailableDataLength())
{
// we can copy the event outright. copy event and
// subsequently evict head s.t. evicting the head
// element always succeeds.
// Since we're calling CopyElement and we've checked
// that there is space in the next buffer, we don't expect
// this to fail.
err = CopyToNextBuffer(eventBuffer);
SuccessOrExit(err);
// success; evict head unconditionally
eventBuffer->mProcessEvictedElement = nullptr;
err = eventBuffer->EvictHead();
// if unconditional eviction failed, this
// means that we have no way of further
// clearing the buffer. fail out and let the
// caller know that we could not honor the
// request
SuccessOrExit(err);
continue;
}
// we cannot copy event outright. We remember the
// current required space in mRequiredSpaceForEvicted, we note the
// space requirements for the event in the current
// buffer and make that space in the next buffer.
eventBuffer->SetRequiredSpaceforEvicted(requiredSpace);
eventBuffer = eventBuffer->GetNextCircularEventBuffer();
// Sanity check: return error here on null event buffer. If
// eventBuffer->mpNext were null, then the `EvictBuffer`
// would have succeeded -- the event was
// already in the final buffer.
VerifyOrExit(eventBuffer != nullptr, err = CHIP_ERROR_INCORRECT_STATE);
requiredSpace = ctx.mSpaceNeededForMovedEvent;
}
}
else
{
// this branch is only taken when we go back in the buffer chain since we have free/spare enough space in next buffer,
// and need to retry to copy event from current buffer to next buffer, and free space for current buffer
if (eventBuffer == mpEventBuffer)
break;
eventBuffer = eventBuffer->GetPreviousCircularEventBuffer();
requiredSpace = eventBuffer->GetRequiredSpaceforEvicted();
err = CHIP_NO_ERROR;
}
}
mpEventBuffer->mProcessEvictedElement = nullptr;
mpEventBuffer->mAppData = nullptr;
exit:
return err;
}
CHIP_ERROR EventManagement::CalculateEventSize(EventLoggingDelegate * apDelegate, const EventOptions * apOptions,
uint32_t & requiredSize)
{
System::PacketBufferTLVWriter writer;
EventLoadOutContext ctxt = EventLoadOutContext(writer, apOptions->mPriority, GetLastEventNumber());
System::PacketBufferHandle buf = System::PacketBufferHandle::New(kMaxEventSizeReserve);
if (buf.IsNull())
{
return CHIP_ERROR_NO_MEMORY;
}
writer.Init(std::move(buf));
ctxt.mCurrentEventNumber = mLastEventNumber;
ctxt.mCurrentTime = mLastEventTimestamp;
CHIP_ERROR err = ConstructEvent(&ctxt, apDelegate, apOptions);
if (err == CHIP_NO_ERROR)
{
requiredSize = writer.GetLengthWritten();
}
return err;
}
CHIP_ERROR EventManagement::ConstructEvent(EventLoadOutContext * apContext, EventLoggingDelegate * apDelegate,
const EventOptions * apOptions)
{
VerifyOrReturnError(apContext->mCurrentEventNumber >= apContext->mStartingEventNumber, CHIP_NO_ERROR
/* no-op: don't write event, but advance current event Number */);
VerifyOrReturnError(apOptions != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
EventReportIB::Builder eventReportBuilder;
ReturnErrorOnFailure(eventReportBuilder.Init(&(apContext->mWriter)));
EventDataIB::Builder & eventDataIBBuilder = eventReportBuilder.CreateEventData();
ReturnErrorOnFailure(eventReportBuilder.GetError());
EventPathIB::Builder & eventPathBuilder = eventDataIBBuilder.CreatePath();
ReturnErrorOnFailure(eventDataIBBuilder.GetError());
CHIP_ERROR err = eventPathBuilder.Endpoint(apOptions->mPath.mEndpointId)
.Cluster(apOptions->mPath.mClusterId)
.Event(apOptions->mPath.mEventId)
.EndOfEventPathIB();
ReturnErrorOnFailure(err);
eventDataIBBuilder.EventNumber(apContext->mCurrentEventNumber).Priority(chip::to_underlying(apContext->mPriority));
ReturnErrorOnFailure(eventDataIBBuilder.GetError());
if (apOptions->mTimestamp.IsSystem())
{
eventDataIBBuilder.SystemTimestamp(apOptions->mTimestamp.mValue);
}
else
{
eventDataIBBuilder.EpochTimestamp(apOptions->mTimestamp.mValue);
}
ReturnErrorOnFailure(eventDataIBBuilder.GetError());
// Callback to write the EventData
ReturnErrorOnFailure(apDelegate->WriteEvent(apContext->mWriter));
// The fabricIndex profile tag is internal use only for fabric filtering when retrieving event from circular event buffer,
// and would not go on the wire.
// Revisit FabricRemovedCB function should the encoding of fabricIndex change in the future.
if (apOptions->mFabricIndex != kUndefinedFabricIndex)
{
apContext->mWriter.Put(TLV::ProfileTag(kEventManagementProfile, kFabricIndexTag), apOptions->mFabricIndex);
}
ReturnErrorOnFailure(eventDataIBBuilder.EndOfEventDataIB());
ReturnErrorOnFailure(eventReportBuilder.EndOfEventReportIB());
ReturnErrorOnFailure(apContext->mWriter.Finalize());
apContext->mFirst = false;
return CHIP_NO_ERROR;
}
void EventManagement::CreateEventManagement(Messaging::ExchangeManager * apExchangeManager, uint32_t aNumBuffers,
CircularEventBuffer * apCircularEventBuffer,
const LogStorageResources * const apLogStorageResources,
MonotonicallyIncreasingCounter<EventNumber> * apEventNumberCounter,
System::Clock::Milliseconds64 aMonotonicStartupTime)
{
sInstance.Init(apExchangeManager, aNumBuffers, apCircularEventBuffer, apLogStorageResources, apEventNumberCounter,
aMonotonicStartupTime);
}
/**
* @brief Perform any actions we need to on shutdown.
*/
void EventManagement::DestroyEventManagement()
{
sInstance.mState = EventManagementStates::Shutdown;
sInstance.mpEventBuffer = nullptr;
sInstance.mpExchangeMgr = nullptr;
}
CircularEventBuffer * EventManagement::GetPriorityBuffer(PriorityLevel aPriority) const
{
CircularEventBuffer * buf = mpEventBuffer;
while (!buf->IsFinalDestinationForPriority(aPriority))
{
buf = buf->GetNextCircularEventBuffer();
assert(buf != nullptr);
// code guarantees that every PriorityLevel has a buffer destination.
}
return buf;
}
CHIP_ERROR EventManagement::CopyAndAdjustDeltaTime(const TLVReader & aReader, size_t aDepth, void * apContext)
{
CopyAndAdjustDeltaTimeContext * ctx = static_cast<CopyAndAdjustDeltaTimeContext *>(apContext);
TLVReader reader(aReader);
if (aReader.GetTag() == TLV::ProfileTag(kEventManagementProfile, kFabricIndexTag))
{
// Does not go on the wire.
return CHIP_NO_ERROR;
}
if ((aReader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kSystemTimestamp)) && !(ctx->mpContext->mFirst) &&
(ctx->mpContext->mCurrentTime.mType == ctx->mpContext->mPreviousTime.mType))
{
return ctx->mpWriter->Put(TLV::ContextTag(EventDataIB::Tag::kDeltaSystemTimestamp),
ctx->mpContext->mCurrentTime.mValue - ctx->mpContext->mPreviousTime.mValue);
}
if ((aReader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kEpochTimestamp)) && !(ctx->mpContext->mFirst) &&
(ctx->mpContext->mCurrentTime.mType == ctx->mpContext->mPreviousTime.mType))
{
return ctx->mpWriter->Put(TLV::ContextTag(EventDataIB::Tag::kDeltaEpochTimestamp),
ctx->mpContext->mCurrentTime.mValue - ctx->mpContext->mPreviousTime.mValue);
}
return ctx->mpWriter->CopyElement(reader);
}
void EventManagement::VendEventNumber()
{
CHIP_ERROR err = CHIP_NO_ERROR;
// Now advance the counter.
err = mpEventNumberCounter->Advance();
if (err != CHIP_NO_ERROR)
{
ChipLogError(EventLogging, "%s Advance() failed with %" CHIP_ERROR_FORMAT, __FUNCTION__, err.Format());
}
// Assign event Number to the buffer's counter's value.
mLastEventNumber = mpEventNumberCounter->GetValue();
}
CHIP_ERROR EventManagement::LogEvent(EventLoggingDelegate * apDelegate, const EventOptions & aEventOptions,
EventNumber & aEventNumber)
{
assertChipStackLockedByCurrentThread();
VerifyOrReturnError(mState != EventManagementStates::Shutdown, CHIP_ERROR_INCORRECT_STATE);
return LogEventPrivate(apDelegate, aEventOptions, aEventNumber);
}
CHIP_ERROR EventManagement::LogEventPrivate(EventLoggingDelegate * apDelegate, const EventOptions & aEventOptions,
EventNumber & aEventNumber)
{
CircularTLVWriter writer;
CHIP_ERROR err = CHIP_NO_ERROR;
uint32_t requestSize = 0;
aEventNumber = 0;
CircularTLVWriter checkpoint = writer;
EventLoadOutContext ctxt = EventLoadOutContext(writer, aEventOptions.mPriority, mLastEventNumber);
EventOptions opts;
Timestamp timestamp;
#if CHIP_DEVICE_CONFIG_EVENT_LOGGING_UTC_TIMESTAMPS
System::Clock::Milliseconds64 utc_time;
err = System::SystemClock().GetClock_RealTimeMS(utc_time);
if (err == CHIP_NO_ERROR)
{
timestamp = Timestamp::Epoch(utc_time);
}
else
#endif // CHIP_DEVICE_CONFIG_EVENT_LOGGING_UTC_TIMESTAMPS
{
auto systemTimeMs = System::SystemClock().GetMonotonicMilliseconds64() - mMonotonicStartupTime;
timestamp = Timestamp::System(systemTimeMs);
}
opts = EventOptions(timestamp);
// Start the event container (anonymous structure) in the circular buffer
writer.Init(*mpEventBuffer);
opts.mPriority = aEventOptions.mPriority;
// Create all event specific data
// Timestamp; encoded as a delta time
opts.mPath = aEventOptions.mPath;
opts.mFabricIndex = aEventOptions.mFabricIndex;
ctxt.mCurrentEventNumber = mLastEventNumber;
ctxt.mCurrentTime.mValue = mLastEventTimestamp.mValue;
err = CalculateEventSize(apDelegate, &opts, requestSize);
SuccessOrExit(err);
// Ensure we have space in the in-memory logging queues
err = EnsureSpaceInCircularBuffer(requestSize, aEventOptions.mPriority);
SuccessOrExit(err);
err = ConstructEvent(&ctxt, apDelegate, &opts);
SuccessOrExit(err);
mBytesWritten += writer.GetLengthWritten();
exit:
if (err != CHIP_NO_ERROR)
{
ChipLogError(EventLogging, "Log event with error %" CHIP_ERROR_FORMAT, err.Format());
writer = checkpoint;
}
else if (opts.mPriority >= CHIP_CONFIG_EVENT_GLOBAL_PRIORITY)
{
aEventNumber = mLastEventNumber;
VendEventNumber();
mLastEventTimestamp = timestamp;
#if CHIP_CONFIG_EVENT_LOGGING_VERBOSE_DEBUG_LOGS
ChipLogDetail(EventLogging,
"LogEvent event number: 0x" ChipLogFormatX64 " priority: %u, endpoint id: 0x%x"
" cluster id: " ChipLogFormatMEI " event id: 0x%" PRIx32 " %s timestamp: 0x" ChipLogFormatX64,
ChipLogValueX64(aEventNumber), static_cast<unsigned>(opts.mPriority), opts.mPath.mEndpointId,
ChipLogValueMEI(opts.mPath.mClusterId), opts.mPath.mEventId,
opts.mTimestamp.mType == Timestamp::Type::kSystem ? "Sys" : "Epoch", ChipLogValueX64(opts.mTimestamp.mValue));
#endif // CHIP_CONFIG_EVENT_LOGGING_VERBOSE_DEBUG_LOGS
err = InteractionModelEngine::GetInstance()->GetReportingEngine().ScheduleEventDelivery(opts.mPath, mBytesWritten);
}
return err;
}
CHIP_ERROR EventManagement::CopyEvent(const TLVReader & aReader, TLVWriter & aWriter, EventLoadOutContext * apContext)
{
TLVReader reader;
TLVType containerType;
TLVType containerType1;
CopyAndAdjustDeltaTimeContext context(&aWriter, apContext);
CHIP_ERROR err = CHIP_NO_ERROR;
reader.Init(aReader);
ReturnErrorOnFailure(reader.EnterContainer(containerType));
ReturnErrorOnFailure(aWriter.StartContainer(AnonymousTag(), kTLVType_Structure, containerType));
ReturnErrorOnFailure(reader.Next());
ReturnErrorOnFailure(reader.EnterContainer(containerType1));
ReturnErrorOnFailure(
aWriter.StartContainer(TLV::ContextTag(EventReportIB::Tag::kEventData), kTLVType_Structure, containerType1));
err = TLV::Utilities::Iterate(reader, CopyAndAdjustDeltaTime, &context, false /*recurse*/);
if (err == CHIP_END_OF_TLV)
{
err = CHIP_NO_ERROR;
}
ReturnErrorOnFailure(err);
ReturnErrorOnFailure(aWriter.EndContainer(containerType1));
ReturnErrorOnFailure(aWriter.EndContainer(containerType));
ReturnErrorOnFailure(aWriter.Finalize());
return CHIP_NO_ERROR;
}
CHIP_ERROR EventManagement::CheckEventContext(EventLoadOutContext * eventLoadOutContext,
const EventManagement::EventEnvelopeContext & event)
{
if (eventLoadOutContext->mCurrentEventNumber < eventLoadOutContext->mStartingEventNumber)
{
return CHIP_ERROR_UNEXPECTED_EVENT;
}
if (event.mFabricIndex.HasValue() &&
(event.mFabricIndex.Value() == kUndefinedFabricIndex ||
eventLoadOutContext->mSubjectDescriptor.fabricIndex != event.mFabricIndex.Value()))
{
return CHIP_ERROR_UNEXPECTED_EVENT;
}
ConcreteEventPath path(event.mEndpointId, event.mClusterId, event.mEventId);
CHIP_ERROR ret = CHIP_ERROR_UNEXPECTED_EVENT;
for (auto * interestedPath = eventLoadOutContext->mpInterestedEventPaths; interestedPath != nullptr;
interestedPath = interestedPath->mpNext)
{
if (interestedPath->mValue.IsEventPathSupersetOf(path))
{
ret = CHIP_NO_ERROR;
break;
}
}
ReturnErrorOnFailure(ret);
Access::RequestPath requestPath{ .cluster = event.mClusterId,
.endpoint = event.mEndpointId,
.requestType = Access::RequestType::kEventReadRequest,
.entityId = event.mEventId };
Access::Privilege requestPrivilege = RequiredPrivilege::ForReadEvent(path);
CHIP_ERROR accessControlError =
Access::GetAccessControl().Check(eventLoadOutContext->mSubjectDescriptor, requestPath, requestPrivilege);
if (accessControlError != CHIP_NO_ERROR)
{
ReturnErrorCodeIf((accessControlError != CHIP_ERROR_ACCESS_DENIED) &&
(accessControlError != CHIP_ERROR_ACCESS_RESTRICTED_BY_ARL),
accessControlError);
ret = CHIP_ERROR_UNEXPECTED_EVENT;
}
return ret;
}
CHIP_ERROR EventManagement::EventIterator(const TLVReader & aReader, size_t aDepth, EventLoadOutContext * apEventLoadOutContext,
EventEnvelopeContext * event)
{
CHIP_ERROR err = CHIP_NO_ERROR;
TLVReader innerReader;
TLVType tlvType;
TLVType tlvType1;
innerReader.Init(aReader);
VerifyOrDie(event != nullptr);
ReturnErrorOnFailure(innerReader.EnterContainer(tlvType));
ReturnErrorOnFailure(innerReader.Next());
ReturnErrorOnFailure(innerReader.EnterContainer(tlvType1));
err = TLV::Utilities::Iterate(innerReader, FetchEventParameters, event, false /*recurse*/);
if (event->mFieldsToRead != kRequiredEventField)
{
return CHIP_ERROR_INVALID_ARGUMENT;
}
if (err == CHIP_END_OF_TLV)
{
err = CHIP_NO_ERROR;
}
ReturnErrorOnFailure(err);
apEventLoadOutContext->mCurrentTime = event->mCurrentTime;
apEventLoadOutContext->mCurrentEventNumber = event->mEventNumber;
err = CheckEventContext(apEventLoadOutContext, *event);
if (err == CHIP_NO_ERROR)
{
err = CHIP_EVENT_ID_FOUND;
}
else if (err == CHIP_ERROR_UNEXPECTED_EVENT)
{
err = CHIP_NO_ERROR;
}
return err;
}
CHIP_ERROR EventManagement::CopyEventsSince(const TLVReader & aReader, size_t aDepth, void * apContext)
{
EventLoadOutContext * const loadOutContext = static_cast<EventLoadOutContext *>(apContext);
EventEnvelopeContext event;
CHIP_ERROR err = EventIterator(aReader, aDepth, loadOutContext, &event);
if (err == CHIP_EVENT_ID_FOUND)
{
// checkpoint the writer
TLV::TLVWriter checkpoint = loadOutContext->mWriter;
err = CopyEvent(aReader, loadOutContext->mWriter, loadOutContext);
// CHIP_NO_ERROR and CHIP_END_OF_TLV signify a
// successful copy. In all other cases, roll back the
// writer state back to the checkpoint, i.e., the state
// before we began the copy operation.
if ((err != CHIP_NO_ERROR) && (err != CHIP_END_OF_TLV))
{
loadOutContext->mWriter = checkpoint;
return err;
}
loadOutContext->mPreviousTime.mValue = loadOutContext->mCurrentTime.mValue;
loadOutContext->mFirst = false;
loadOutContext->mEventCount++;
}
return err;
}
CHIP_ERROR EventManagement::FetchEventsSince(TLVWriter & aWriter, const SingleLinkedListNode<EventPathParams> * apEventPathList,
EventNumber & aEventMin, size_t & aEventCount,
const Access::SubjectDescriptor & aSubjectDescriptor)
{
// TODO: Add particular set of event Paths in FetchEventsSince so that we can filter the interested paths
CHIP_ERROR err = CHIP_NO_ERROR;
const bool recurse = false;
TLVReader reader;
CircularEventBufferWrapper bufWrapper;
EventLoadOutContext context(aWriter, PriorityLevel::Invalid, aEventMin);
context.mSubjectDescriptor = aSubjectDescriptor;
context.mpInterestedEventPaths = apEventPathList;
err = GetEventReader(reader, PriorityLevel::Critical, &bufWrapper);
SuccessOrExit(err);
err = TLV::Utilities::Iterate(reader, CopyEventsSince, &context, recurse);
if (err == CHIP_END_OF_TLV)
{
err = CHIP_NO_ERROR;
}
exit:
if (err == CHIP_ERROR_BUFFER_TOO_SMALL || err == CHIP_ERROR_NO_MEMORY)
{
// We failed to fetch the current event because the buffer is too small, we will start from this one the next time.
aEventMin = context.mCurrentEventNumber;
}
else
{
// For all other cases, continue from the next event.
aEventMin = context.mCurrentEventNumber + 1;
}
aEventCount += context.mEventCount;
return err;
}
CHIP_ERROR EventManagement::FabricRemovedCB(const TLV::TLVReader & aReader, size_t aDepth, void * apContext)
{
// the function does not actually remove the event, instead, it sets the fabric index to an invalid value.
FabricIndex * invalidFabricIndex = static_cast<FabricIndex *>(apContext);
TLVReader event;
TLVType tlvType;
TLVType tlvType1;
event.Init(aReader);
VerifyOrReturnError(event.EnterContainer(tlvType) == CHIP_NO_ERROR, CHIP_NO_ERROR);
VerifyOrReturnError(event.Next(TLV::ContextTag(EventReportIB::Tag::kEventData)) == CHIP_NO_ERROR, CHIP_NO_ERROR);
VerifyOrReturnError(event.EnterContainer(tlvType1) == CHIP_NO_ERROR, CHIP_NO_ERROR);
while (CHIP_NO_ERROR == event.Next())
{
if (event.GetTag() == TLV::ProfileTag(kEventManagementProfile, kFabricIndexTag))
{
uint8_t fabricIndex = 0;
VerifyOrReturnError(event.Get(fabricIndex) == CHIP_NO_ERROR, CHIP_NO_ERROR);
if (fabricIndex == *invalidFabricIndex)
{
TLVCircularBuffer * readBuffer = static_cast<TLVCircularBuffer *>(event.GetBackingStore());
// fabricIndex is encoded as an integer; the dataPtr will point to a location immediately after its encoding
// shift the dataPtr to point to the encoding of the fabric index, accounting for wraparound in backing storage
// we cannot get the actual encoding size from current container beginning to the fabric index because of several
// optional parameters, so we are assuming minimal encoding is used and the fabric index is 1 byte.
uint8_t * dataPtr;
if (event.GetReadPoint() != readBuffer->GetQueue())
{
dataPtr = readBuffer->GetQueue() + (event.GetReadPoint() - readBuffer->GetQueue() - 1);
}
else
{
dataPtr = readBuffer->GetQueue() + readBuffer->GetTotalDataLength() - 1;
}
*dataPtr = kUndefinedFabricIndex;
}
return CHIP_NO_ERROR;
}
}
return CHIP_NO_ERROR;
}
CHIP_ERROR EventManagement::FabricRemoved(FabricIndex aFabricIndex)
{
const bool recurse = false;
TLVReader reader;
CircularEventBufferWrapper bufWrapper;
ReturnErrorOnFailure(GetEventReader(reader, PriorityLevel::Critical, &bufWrapper));
CHIP_ERROR err = TLV::Utilities::Iterate(reader, FabricRemovedCB, &aFabricIndex, recurse);
if (err == CHIP_END_OF_TLV)
{
err = CHIP_NO_ERROR;
}
return err;
}
CHIP_ERROR EventManagement::GetEventReader(TLVReader & aReader, PriorityLevel aPriority, CircularEventBufferWrapper * apBufWrapper)
{
CircularEventBuffer * buffer = GetPriorityBuffer(aPriority);
VerifyOrReturnError(buffer != nullptr, CHIP_ERROR_INVALID_ARGUMENT);
apBufWrapper->mpCurrent = buffer;
CircularEventReader reader;
reader.Init(apBufWrapper);
aReader.Init(reader);
return CHIP_NO_ERROR;
}
CHIP_ERROR EventManagement::FetchEventParameters(const TLVReader & aReader, size_t, void * apContext)
{
EventEnvelopeContext * const envelope = static_cast<EventEnvelopeContext *>(apContext);
TLVReader reader;
reader.Init(aReader);
if (reader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kPath))
{
EventPathIB::Parser path;
ReturnErrorOnFailure(path.Init(aReader));
ReturnErrorOnFailure(path.GetEndpoint(&(envelope->mEndpointId)));
ReturnErrorOnFailure(path.GetCluster(&(envelope->mClusterId)));
ReturnErrorOnFailure(path.GetEvent(&(envelope->mEventId)));
envelope->mFieldsToRead |= 1 << to_underlying(EventDataIB::Tag::kPath);
}
if (reader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kPriority))
{
uint16_t extPriority; // Note: the type here matches the type case in EventManagement::LogEvent, priority section
ReturnErrorOnFailure(reader.Get(extPriority));
envelope->mPriority = static_cast<PriorityLevel>(extPriority);
envelope->mFieldsToRead |= 1 << to_underlying(EventDataIB::Tag::kPriority);
}
if (reader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kEventNumber))
{
ReturnErrorOnFailure(reader.Get(envelope->mEventNumber));
}
if (reader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kSystemTimestamp))
{
uint64_t systemTime;
ReturnErrorOnFailure(reader.Get(systemTime));
envelope->mCurrentTime.mType = Timestamp::Type::kSystem;
envelope->mCurrentTime.mValue = systemTime;
}
if (reader.GetTag() == TLV::ContextTag(EventDataIB::Tag::kEpochTimestamp))
{
uint64_t epochTime;
ReturnErrorOnFailure(reader.Get(epochTime));
envelope->mCurrentTime.mType = Timestamp::Type::kEpoch;
envelope->mCurrentTime.mValue = epochTime;
}
if (reader.GetTag() == TLV::ProfileTag(kEventManagementProfile, kFabricIndexTag))
{
uint8_t fabricIndex = kUndefinedFabricIndex;
ReturnErrorOnFailure(reader.Get(fabricIndex));
envelope->mFabricIndex.SetValue(fabricIndex);
}
return CHIP_NO_ERROR;
}
CHIP_ERROR EventManagement::EvictEvent(TLVCircularBuffer & apBuffer, void * apAppData, TLVReader & aReader)
{
// pull out the delta time, pull out the priority
ReturnErrorOnFailure(aReader.Next());
TLVType containerType;
TLVType containerType1;
ReturnErrorOnFailure(aReader.EnterContainer(containerType));
ReturnErrorOnFailure(aReader.Next());
ReturnErrorOnFailure(aReader.EnterContainer(containerType1));
EventEnvelopeContext context;
constexpr bool recurse = false;
CHIP_ERROR err = TLV::Utilities::Iterate(aReader, FetchEventParameters, &context, recurse);
if (err == CHIP_END_OF_TLV)
{
err = CHIP_NO_ERROR;
}
ReturnErrorOnFailure(err);
ReturnErrorOnFailure(aReader.ExitContainer(containerType1));
ReturnErrorOnFailure(aReader.ExitContainer(containerType));
const PriorityLevel imp = static_cast<PriorityLevel>(context.mPriority);
ReclaimEventCtx * const ctx = static_cast<ReclaimEventCtx *>(apAppData);
CircularEventBuffer * const eventBuffer = ctx->mpEventBuffer;
if (eventBuffer->IsFinalDestinationForPriority(imp))
{
ChipLogProgress(EventLogging,
"Dropped 1 event from buffer with priority %u and event number 0x" ChipLogFormatX64
" due to overflow: event priority_level: %u",
static_cast<unsigned>(eventBuffer->GetPriority()), ChipLogValueX64(context.mEventNumber),
static_cast<unsigned>(imp));
ctx->mSpaceNeededForMovedEvent = 0;
return CHIP_NO_ERROR;
}
// event is not getting dropped. Note how much space it requires, and return.
ctx->mSpaceNeededForMovedEvent = aReader.GetLengthRead();
return CHIP_END_OF_TLV;
}
void EventManagement::SetScheduledEventInfo(EventNumber & aEventNumber, uint32_t & aInitialWrittenEventBytes) const
{
aEventNumber = mLastEventNumber;
aInitialWrittenEventBytes = mBytesWritten;
}
CHIP_ERROR EventManagement::GenerateEvent(EventLoggingDelegate * eventPayloadWriter, const EventOptions & options,
EventNumber & generatedEventNumber)
{
return LogEvent(eventPayloadWriter, options, generatedEventNumber);
}
void CircularEventBuffer::Init(uint8_t * apBuffer, uint32_t aBufferLength, CircularEventBuffer * apPrev,
CircularEventBuffer * apNext, PriorityLevel aPriorityLevel)
{
TLVCircularBuffer::Init(apBuffer, aBufferLength);
mpPrev = apPrev;
mpNext = apNext;
mPriority = aPriorityLevel;
}
bool CircularEventBuffer::IsFinalDestinationForPriority(PriorityLevel aPriority) const
{
return !((mpNext != nullptr) && (mpNext->mPriority <= aPriority));
}
/**
* @brief
* TLVCircularBuffer::OnInit can modify the state of the buffer, but we don't want that behavior here.
* We want to make sure we don't change our state, and just report the currently-available space.
*/
CHIP_ERROR CircularEventBuffer::OnInit(TLV::TLVWriter & writer, uint8_t *& bufStart, uint32_t & bufLen)
{
GetCurrentWritableBuffer(bufStart, bufLen);
return CHIP_NO_ERROR;
}
void CircularEventReader::Init(CircularEventBufferWrapper * apBufWrapper)
{
CircularEventBuffer * prev;
if (apBufWrapper->mpCurrent == nullptr)
return;
TLVReader::Init(*apBufWrapper, apBufWrapper->mpCurrent->DataLength());
mMaxLen = apBufWrapper->mpCurrent->DataLength();
for (prev = apBufWrapper->mpCurrent->GetPreviousCircularEventBuffer(); prev != nullptr;
prev = prev->GetPreviousCircularEventBuffer())
{
CircularEventBufferWrapper bufWrapper;
bufWrapper.mpCurrent = prev;
mMaxLen += prev->DataLength();
}
}
CHIP_ERROR CircularEventBufferWrapper::GetNextBuffer(TLVReader & aReader, const uint8_t *& aBufStart, uint32_t & aBufLen)
{
CHIP_ERROR err = CHIP_NO_ERROR;
mpCurrent->GetNextBuffer(aReader, aBufStart, aBufLen);
SuccessOrExit(err);
if ((aBufLen == 0) && (mpCurrent->GetPreviousCircularEventBuffer() != nullptr))
{
mpCurrent = mpCurrent->GetPreviousCircularEventBuffer();
aBufStart = nullptr;
err = GetNextBuffer(aReader, aBufStart, aBufLen);
}
exit:
return err;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,569 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* Copyright (c) 2015-2017 Nest Labs, Inc.
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
*
* @brief
* Management of the CHIP Event Logging.
*
*/
#pragma once
#include "EventLoggingDelegate.h"
#include <access/SubjectDescriptor.h>
#include <app/EventLoggingTypes.h>
#include <app/MessageDef/EventDataIB.h>
#include <app/MessageDef/StatusIB.h>
#include <app/data-model-provider/EventsGenerator.h>
#include <app/util/basic-types.h>
#include <lib/core/TLVCircularBuffer.h>
#include <lib/support/CHIPCounter.h>
#include <lib/support/LinkedList.h>
#include <messaging/ExchangeMgr.h>
#include <platform/CHIPDeviceConfig.h>
#include <system/SystemClock.h>
/**
* Events are stored in the LogStorageResources provided to
* EventManagement::Init.
*
* A newly generated event will be placed in the lowest-priority (in practice
* DEBUG) buffer, the one associated with the first LogStorageResource. If
* there is no space in that buffer, space will be created by evicting the
* oldest event currently in that buffer, until enough space is available.
*
* When an event is evicted from a buffer, there are two possibilities:
*
* 1) If the next LogStorageResource has a priority that is no higher than the
* event's priority, the event will be moved to that LogStorageResource's
* buffer. This may in turn require events to be evicted from that buffer.
* 2) If the next LogStorageResource has a priority that is higher than the
* event's priority, then the event is just dropped.
*
* This means that LogStorageResources at a given priority level are reserved
* for events of that priority level or higher priority.
*
* As a simple example, assume there are only two priority levels, DEBUG and
* CRITICAL, and two LogStorageResources with those priorities. In that case,
* old CRITICAL events will not start getting dropped until both buffers are
* full, while old DEBUG events will start getting dropped once the DEBUG
* LogStorageResource buffer is full.
*/
#define CHIP_CONFIG_EVENT_GLOBAL_PRIORITY PriorityLevel::Debug
namespace chip {
namespace app {
inline constexpr const uint32_t kEventManagementProfile = 0x1;
inline constexpr const uint32_t kFabricIndexTag = 0x1;
inline constexpr size_t kMaxEventSizeReserve = 512;
constexpr uint16_t kRequiredEventField =
(1 << to_underlying(EventDataIB::Tag::kPriority)) | (1 << to_underlying(EventDataIB::Tag::kPath));
/**
* @brief
* Internal event buffer, built around the TLV::TLVCircularBuffer
*/
class CircularEventBuffer : public TLV::TLVCircularBuffer
{
public:
/**
* @brief
* A constructor for the CircularEventBuffer (internal API).
*/
CircularEventBuffer() : TLVCircularBuffer(nullptr, 0){};
/**
* @brief
* A Init for the CircularEventBuffer (internal API).
*
* @param[in] apBuffer The actual storage to use for event storage.
*
* @param[in] aBufferLength The length of the \c apBuffer in bytes.
*
* @param[in] apPrev The pointer to CircularEventBuffer storing
* events of lesser priority.
*
* @param[in] apNext The pointer to CircularEventBuffer storing
* events of greater priority.
*
* @param[in] aPriorityLevel CircularEventBuffer priority level
*/
void Init(uint8_t * apBuffer, uint32_t aBufferLength, CircularEventBuffer * apPrev, CircularEventBuffer * apNext,
PriorityLevel aPriorityLevel);
/**
* @brief
* A helper function that determines whether the event of
* specified priority is final destination
*
* @param[in] aPriority Priority of the event.
*
* @retval true/false event's priority is same as current buffer's priority, otherwise, false
*/
bool IsFinalDestinationForPriority(PriorityLevel aPriority) const;
PriorityLevel GetPriority() { return mPriority; }
CircularEventBuffer * GetPreviousCircularEventBuffer() { return mpPrev; }
CircularEventBuffer * GetNextCircularEventBuffer() { return mpNext; }
void SetRequiredSpaceforEvicted(size_t aRequiredSpace) { mRequiredSpaceForEvicted = aRequiredSpace; }
size_t GetRequiredSpaceforEvicted() const { return mRequiredSpaceForEvicted; }
~CircularEventBuffer() override = default;
private:
CircularEventBuffer * mpPrev = nullptr; ///< A pointer CircularEventBuffer storing events less important events
CircularEventBuffer * mpNext = nullptr; ///< A pointer CircularEventBuffer storing events more important events
PriorityLevel mPriority = PriorityLevel::Invalid; ///< The buffer is the final bucket for events of this priority. Events of
///< lesser priority are dropped when they get bumped out of this buffer
size_t mRequiredSpaceForEvicted = 0; ///< Required space for previous buffer to evict event to new buffer
CHIP_ERROR OnInit(TLV::TLVWriter & writer, uint8_t *& bufStart, uint32_t & bufLen) override;
};
class CircularEventReader;
/**
* @brief
* A CircularEventBufferWrapper which has a pointer to the "current CircularEventBuffer". When trying to locate next buffer,
* if nothing left there update its CircularEventBuffer until the buffer with data has been found,
* the tlv reader will have a pointer to this impl.
*/
class CircularEventBufferWrapper : public TLV::TLVCircularBuffer
{
public:
CircularEventBufferWrapper() : TLVCircularBuffer(nullptr, 0), mpCurrent(nullptr){};
CircularEventBuffer * mpCurrent;
private:
CHIP_ERROR GetNextBuffer(chip::TLV::TLVReader & aReader, const uint8_t *& aBufStart, uint32_t & aBufLen) override;
};
enum class EventManagementStates
{
Idle = 1, // No log offload in progress, log offload can begin without any constraints
InProgress = 2, // Log offload in progress
Shutdown = 3 // Not capable of performing any logging operation
};
/**
* @brief
* A helper class used in initializing logging management.
*
* The class is used to encapsulate the resources allocated by the caller and denotes
* resources to be used in logging events of a particular priority. Note that
* while resources referring to the counters are used exclusively by the
* particular priority level, the buffers are shared between `this` priority
* level and events that are "more" important.
*/
struct LogStorageResources
{
// TODO: Update TLVCircularBuffer with size_t for buffer size, then use ByteSpan
uint8_t * mpBuffer =
nullptr; // Buffer to be used as a storage at the particular priority level and shared with more important events.
// Must not be nullptr. Must be large enough to accommodate the largest event emitted by the system.
uint32_t mBufferSize = 0; ///< The size, in bytes, of the `mBuffer`.
PriorityLevel mPriority =
PriorityLevel::Invalid; // Log priority level associated with the resources provided in this structure.
};
/**
* @brief
* A class for managing the in memory event logs. See documentation at the
* top of the file describing the eviction policy for events when there is no
* more space for new events.
*/
class EventManagement : public DataModel::EventsGenerator
{
public:
/**
* Initialize the EventManagement with an array of LogStorageResources and
* an equal-length array of CircularEventBuffers that correspond to those
* LogStorageResources. The array of LogStorageResources must provide a
* resource for each valid priority level, the elements of the array must be
* in increasing numerical value of priority (and in increasing priority);
* the first element in the array corresponds to the resources allocated for
* least important events, and the last element corresponds to the most
* critical events.
*
* @param[in] apExchangeManager ExchangeManager to be used with this logging subsystem
*
* @param[in] aNumBuffers Number of elements in the apLogStorageResources
* and apCircularEventBuffer arrays.
*
* @param[in] apCircularEventBuffer An array of CircularEventBuffer for each priority level.
*
* @param[in] apLogStorageResources An array of LogStorageResources for each priority level.
*
* @param[in] apEventNumberCounter A counter to use for event numbers.
*
* @param[in] aMonotonicStartupTime Time we should consider as "monotonic
* time 0" for cases when we use
* system-time event timestamps.
*
*/
void Init(Messaging::ExchangeManager * apExchangeManager, uint32_t aNumBuffers, CircularEventBuffer * apCircularEventBuffer,
const LogStorageResources * const apLogStorageResources,
MonotonicallyIncreasingCounter<EventNumber> * apEventNumberCounter,
System::Clock::Milliseconds64 aMonotonicStartupTime);
static EventManagement & GetInstance();
/**
* @brief Create EventManagement object and initialize the logging management
* subsystem with provided resources.
*
* Initialize the EventManagement with an array of LogStorageResources. The
* array must provide a resource for each valid priority level, the elements
* of the array must be in increasing numerical value of priority (and in
* decreasing priority); the first element in the array corresponds to the
* resources allocated for the most critical events, and the last element
* corresponds to the least important events.
*
* @param[in] apExchangeManager ExchangeManager to be used with this logging subsystem
*
* @param[in] aNumBuffers Number of elements in inLogStorageResources array
*
* @param[in] apCircularEventBuffer An array of CircularEventBuffer for each priority level.
* @param[in] apLogStorageResources An array of LogStorageResources for each priority level.
*
* @param[in] apEventNumberCounter A counter to use for event numbers.
*
* @param[in] aMonotonicStartupTime Time we should consider as "monotonic
* time 0" for cases when we use
* system-time event timestamps.
*
* @note This function must be called prior to the logging being used.
*/
static void
CreateEventManagement(Messaging::ExchangeManager * apExchangeManager, uint32_t aNumBuffers,
CircularEventBuffer * apCircularEventBuffer, const LogStorageResources * const apLogStorageResources,
MonotonicallyIncreasingCounter<EventNumber> * apEventNumberCounter,
System::Clock::Milliseconds64 aMonotonicStartupTime = System::SystemClock().GetMonotonicMilliseconds64());
static void DestroyEventManagement();
/**
* @brief
* Log an event via a EventLoggingDelegate, with options.
*
* The EventLoggingDelegate writes the event metadata and calls the `apDelegate`
* with an TLV::TLVWriter reference so that the user code can emit
* the event data directly into the event log. This form of event
* logging minimizes memory consumption, as event data is serialized
* directly into the target buffer. The event data MUST contain
* context tags to be interpreted within the schema identified by
* `ClusterID` and `EventId`. The tag of the first element will be
* ignored; the event logging system will replace it with the
* eventData tag.
*
* The event is logged if the schema priority exceeds the logging
* threshold specified in the LoggingConfiguration. If the event's
* priority does not meet the current threshold, it is dropped and
* the function returns a `0` as the resulting event ID.
*
* This variant of the invocation permits the caller to set any
* combination of `EventOptions`:
* - timestamp, when 0 defaults to the current time at the point of
* the call,
* - "root" section of the event source (event source and cluster ID);
* if NULL, it defaults to the current device. the event is marked as
* relating to the device that is making the call,
*
* @param[in] apDelegate The EventLoggingDelegate to serialize the event data
*
* @param[in] aEventOptions The options for the event metadata.
*
* @param[out] aEventNumber The event Number if the event was written to the
* log, 0 otherwise.
*
* @return CHIP_ERROR CHIP Error Code
*/
CHIP_ERROR LogEvent(EventLoggingDelegate * apDelegate, const EventOptions & aEventOptions, EventNumber & aEventNumber);
/**
* @brief
* A helper method to get tlv reader along with buffer has data from particular priority
*
* @param[in,out] aReader A reference to the reader that will be
* initialized with the backing storage from
* the event log
*
* @param[in] aPriority The starting priority for the reader.
* Note that in this case the starting
* priority is somewhat counter intuitive:
* more important events share the buffers
* with less priority events, in addition to
* their dedicated buffers. As a result, the
* reader will traverse the least data when
* the Debug priority is passed in.
*
* @param[in] apBufWrapper CircularEventBufferWrapper
* @return #CHIP_NO_ERROR Unconditionally.
*/
CHIP_ERROR GetEventReader(chip::TLV::TLVReader & aReader, PriorityLevel aPriority,
app::CircularEventBufferWrapper * apBufWrapper);
/**
* @brief
* A function to retrieve events of specified priority since a specified event ID.
*
* Given a TLV::TLVWriter, an priority type, and an event ID, the
* function will fetch events since the
* specified event number. The function will continue fetching events until
* it runs out of space in the TLV::TLVWriter or in the log. The function
* will terminate the event writing on event boundary. The function would filter out event based upon interested path
* specified by read/subscribe request.
*
* @param[in] aWriter The writer to use for event storage
* @param[in] apEventPathList the interested EventPathParams list
*
* @param[in,out] aEventMin On input, the Event number is the one we're fetching. On
* completion, the event number of the next one we plan to fetch.
*
* @param[out] aEventCount The number of fetched event
* @param[in] aSubjectDescriptor Subject descriptor for current read handler
* @retval #CHIP_END_OF_TLV The function has reached the end of the
* available log entries at the specified
* priority level
*
* @retval #CHIP_ERROR_NO_MEMORY The function ran out of space in the
* aWriter, more events in the log are
* available.
*
* @retval #CHIP_ERROR_BUFFER_TOO_SMALL The function ran out of space in the
* aWriter, more events in the log are
* available.
*
*/
CHIP_ERROR FetchEventsSince(chip::TLV::TLVWriter & aWriter, const SingleLinkedListNode<EventPathParams> * apEventPathList,
EventNumber & aEventMin, size_t & aEventCount,
const Access::SubjectDescriptor & aSubjectDescriptor);
/**
* @brief brief Iterate all events and invalidate the fabric-sensitive events whose associated fabric has the given fabric
* index.
*/
CHIP_ERROR FabricRemoved(FabricIndex aFabricIndex);
/**
* @brief
* Fetch the most recently vended Number for a particular priority level
*
* @return EventNumber most recently vended event Number for that event priority
*/
EventNumber GetLastEventNumber() const { return mLastEventNumber; }
/**
* @brief
* IsValid returns whether the EventManagement instance is valid
*/
bool IsValid(void) { return EventManagementStates::Shutdown != mState; };
/**
* Logger would save last logged event number and initial written event bytes number into schedule event number array
*/
void SetScheduledEventInfo(EventNumber & aEventNumber, uint32_t & aInitialWrittenEventBytes) const;
/* EventsGenerator implementation */
CHIP_ERROR GenerateEvent(EventLoggingDelegate * eventPayloadWriter, const EventOptions & options,
EventNumber & generatedEventNumber) override;
private:
/**
* @brief
* Internal structure for traversing events.
*/
struct EventEnvelopeContext
{
EventEnvelopeContext() {}
int mFieldsToRead = 0;
/* PriorityLevel and DeltaTime are there if that is not first event when putting events in report*/
#if CHIP_DEVICE_CONFIG_EVENT_LOGGING_UTC_TIMESTAMPS
Timestamp mCurrentTime = Timestamp::Epoch(System::Clock::kZero);
#else // CHIP_DEVICE_CONFIG_EVENT_LOGGING_UTC_TIMESTAMPS
Timestamp mCurrentTime = Timestamp::System(System::Clock::kZero);
#endif // CHIP_DEVICE_CONFIG_EVENT_LOGGING_UTC_TIMESTAMPS
PriorityLevel mPriority = PriorityLevel::First;
ClusterId mClusterId = 0;
EndpointId mEndpointId = 0;
EventId mEventId = 0;
EventNumber mEventNumber = 0;
Optional<FabricIndex> mFabricIndex;
};
void VendEventNumber();
CHIP_ERROR CalculateEventSize(EventLoggingDelegate * apDelegate, const EventOptions * apOptions, uint32_t & requiredSize);
/**
* @brief Helper function for writing event header and data according to event
* logging protocol.
*
* @param[in,out] apContext EventLoadOutContext, initialized with stateful
* information for the buffer. State is updated
* and preserved by ConstructEvent using this context.
*
* @param[in] apDelegate The EventLoggingDelegate to serialize the event data
*
* @param[in] apOptions EventOptions describing timestamp and other tags
* relevant to this event.
*
*/
CHIP_ERROR ConstructEvent(EventLoadOutContext * apContext, EventLoggingDelegate * apDelegate, const EventOptions * apOptions);
// Internal function to log event
CHIP_ERROR LogEventPrivate(EventLoggingDelegate * apDelegate, const EventOptions & aEventOptions, EventNumber & aEventNumber);
/**
* @brief copy the event outright to next buffer with higher priority
*
* @param[in] apEventBuffer CircularEventBuffer
*
*/
CHIP_ERROR CopyToNextBuffer(CircularEventBuffer * apEventBuffer);
/**
* @brief Ensure that:
*
* 1) There could be aRequiredSpace bytes available (if enough things were
* evicted) in all buffers that can hold events with priority aPriority.
*
* 2) There are in fact aRequiredSpace bytes available in our
* lowest-priority buffer. This might involve evicting some events to
* higher-priority buffers or dropping them.
*
* @param[in] aRequiredSpace required space
* @param[in] aPriority priority of the event we are making space for.
*
*/
CHIP_ERROR EnsureSpaceInCircularBuffer(size_t aRequiredSpace, PriorityLevel aPriority);
/**
* @brief Iterate the event elements inside event tlv and mark the fabric index as kUndefinedFabricIndex if
* it matches the FabricIndex apFabricIndex points to.
*
* @param[in] aReader event tlv reader
* @param[in] apFabricIndex A FabricIndex* pointing to the fabric index for which we want to effectively evict events.
*
*/
static CHIP_ERROR FabricRemovedCB(const TLV::TLVReader & aReader, size_t, void * apFabricIndex);
/**
* @brief
* Internal API used to implement #FetchEventsSince
*
* Iterator function to used to copy an event from the log into a
* TLVWriter. The included apContext contains the context of the copy
* operation, including the TLVWriter that will hold the copy of an
* event. If event cannot be written as a whole, the TLVWriter will
* be rolled back to event boundary.
*
* @retval #CHIP_END_OF_TLV Function reached the end of the event
* @retval #CHIP_ERROR_NO_MEMORY Function could not write a portion of
* the event to the TLVWriter.
* @retval #CHIP_ERROR_BUFFER_TOO_SMALL Function could not write a
* portion of the event to the TLVWriter.
*/
static CHIP_ERROR CopyEventsSince(const TLV::TLVReader & aReader, size_t aDepth, void * apContext);
/**
* @brief Internal iterator function used to scan and filter though event logs
*
* The function is used to scan through the event log to find events matching the spec in the supplied context.
* Particularly, it would check against mStartingEventNumber, and skip fetched event.
*/
static CHIP_ERROR EventIterator(const TLV::TLVReader & aReader, size_t aDepth, EventLoadOutContext * apEventLoadOutContext,
EventEnvelopeContext * event);
/**
* @brief Internal iterator function used to fetch event into EventEnvelopeContext, then EventIterator would filter event
* based upon EventEnvelopeContext
*
*/
static CHIP_ERROR FetchEventParameters(const TLV::TLVReader & aReader, size_t aDepth, void * apContext);
/**
* @brief Internal iterator function used to scan and filter though event logs
* First event gets a timestamp, subsequent ones get a delta T
* First event in the sequence gets a event number neatly packaged
*/
static CHIP_ERROR CopyAndAdjustDeltaTime(const TLV::TLVReader & aReader, size_t aDepth, void * apContext);
/**
* @brief checking if the tail's event can be moved to higher priority, if not, dropped, if yes, note how much space it
* requires, and return.
*/
static CHIP_ERROR EvictEvent(chip::TLV::TLVCircularBuffer & aBuffer, void * apAppData, TLV::TLVReader & aReader);
static CHIP_ERROR AlwaysFail(chip::TLV::TLVCircularBuffer & aBuffer, void * apAppData, TLV::TLVReader & aReader)
{
return CHIP_ERROR_NO_MEMORY;
};
/**
* @brief Check whether the event instance represented by the EventEnvelopeContext should be included in the report.
*
* @retval CHIP_ERROR_UNEXPECTED_EVENT This path should be excluded in the generated event report.
* @retval CHIP_EVENT_ID_FOUND This path should be included in the generated event report.
* @retval CHIP_ERROR_ACCESS_DENIED This path should be included in the generated event report, but the client does not have
* . enough privilege to access it.
*
* TODO: Consider using CHIP_NO_ERROR, CHIP_ERROR_SKIP_EVENT, CHIP_ERROR_ACCESS_DENINED or some enum to represent the checking
* result.
*/
static CHIP_ERROR CheckEventContext(EventLoadOutContext * eventLoadOutContext, const EventEnvelopeContext & event);
/**
* @brief copy event from circular buffer to target buffer for report
*/
static CHIP_ERROR CopyEvent(const TLV::TLVReader & aReader, TLV::TLVWriter & aWriter, EventLoadOutContext * apContext);
/**
* @brief
* A function to get the circular buffer for particular priority
*
* @param aPriority PriorityLevel
*
* @return A pointer for the CircularEventBuffer
*/
CircularEventBuffer * GetPriorityBuffer(PriorityLevel aPriority) const;
// EventBuffer for debug level,
CircularEventBuffer * mpEventBuffer = nullptr;
Messaging::ExchangeManager * mpExchangeMgr = nullptr;
EventManagementStates mState = EventManagementStates::Shutdown;
uint32_t mBytesWritten = 0;
// The counter we're going to use for event numbers.
MonotonicallyIncreasingCounter<EventNumber> * mpEventNumberCounter = nullptr;
EventNumber mLastEventNumber = 0; ///< Last event Number vended
Timestamp mLastEventTimestamp; ///< The timestamp of the last event in this buffer
System::Clock::Milliseconds64 mMonotonicStartupTime;
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,65 @@
/*
*
* Copyright (c) 2021 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app/ConcreteEventPath.h>
#include <app/util/basic-types.h>
#include <lib/core/CHIPCore.h>
namespace chip {
namespace app {
struct EventPathParams
{
EventPathParams(EndpointId aEndpointId, ClusterId aClusterId, EventId aEventId, bool aUrgentEvent = false) :
mClusterId(aClusterId), mEventId(aEventId), mEndpointId(aEndpointId), mIsUrgentEvent(aUrgentEvent)
{}
EventPathParams() {}
bool IsSamePath(const EventPathParams & other) const
{
return other.mEndpointId == mEndpointId && other.mClusterId == mClusterId && other.mEventId == mEventId;
}
bool IsWildcardPath() const { return HasWildcardEndpointId() || HasWildcardClusterId() || HasWildcardEventId(); }
// For event, an event id can only be interpreted if the cluster id is known.
bool IsValidEventPath() const { return !(HasWildcardClusterId() && !HasWildcardEventId()); }
inline bool HasWildcardEndpointId() const { return mEndpointId == kInvalidEndpointId; }
inline bool HasWildcardClusterId() const { return mClusterId == kInvalidClusterId; }
inline bool HasWildcardEventId() const { return mEventId == kInvalidEventId; }
inline void SetWildcardEndpointId() { mEndpointId = kInvalidEndpointId; }
inline void SetWildcardClusterId() { mClusterId = kInvalidClusterId; }
inline void SetWildcardEventId() { mEventId = kInvalidEventId; }
bool IsEventPathSupersetOf(const ConcreteEventPath & other) const
{
VerifyOrReturnError(HasWildcardEndpointId() || mEndpointId == other.mEndpointId, false);
VerifyOrReturnError(HasWildcardClusterId() || mClusterId == other.mClusterId, false);
VerifyOrReturnError(HasWildcardEventId() || mEventId == other.mEventId, false);
return true;
}
ClusterId mClusterId = kInvalidClusterId; // uint32
EventId mEventId = kInvalidEventId; // uint32
EndpointId mEndpointId = kInvalidEndpointId; // uint16
bool mIsUrgentEvent = false; // uint8
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,156 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* Provides the implementation of the FailSafeContext object.
*/
#include "FailSafeContext.h"
#include <app/icd/server/ICDServerConfig.h>
#if CHIP_CONFIG_ENABLE_ICD_SERVER
#include <app/icd/server/ICDNotifier.h> // nogncheck
#endif
#include <lib/support/SafeInt.h>
#include <platform/CHIPDeviceConfig.h>
#include <platform/ConnectivityManager.h>
#include <platform/internal/CHIPDeviceLayerInternal.h>
using namespace chip::DeviceLayer;
namespace chip {
namespace app {
void FailSafeContext::HandleArmFailSafeTimer(System::Layer * layer, void * aAppState)
{
FailSafeContext * failSafeContext = reinterpret_cast<FailSafeContext *>(aAppState);
failSafeContext->FailSafeTimerExpired();
}
void FailSafeContext::HandleMaxCumulativeFailSafeTimer(System::Layer * layer, void * aAppState)
{
FailSafeContext * failSafeContext = reinterpret_cast<FailSafeContext *>(aAppState);
failSafeContext->FailSafeTimerExpired();
}
void FailSafeContext::HandleDisarmFailSafe(intptr_t arg)
{
FailSafeContext * failSafeContext = reinterpret_cast<FailSafeContext *>(arg);
failSafeContext->DisarmFailSafe();
}
void FailSafeContext::SetFailSafeArmed(bool armed)
{
#if CHIP_CONFIG_ENABLE_ICD_SERVER
if (IsFailSafeArmed() != armed)
{
ICDNotifier::GetInstance().BroadcastActiveRequest(ICDListener::KeepActiveFlag::kFailSafeArmed, armed);
}
#endif
mFailSafeArmed = armed;
}
void FailSafeContext::FailSafeTimerExpired()
{
if (!IsFailSafeArmed())
{
// In case this was a pending timer event in event loop, and we had
// done CommissioningComplete or manual disarm.
return;
}
ChipLogProgress(FailSafe, "Fail-safe timer expired");
ScheduleFailSafeCleanup(mFabricIndex, mAddNocCommandHasBeenInvoked, mUpdateNocCommandHasBeenInvoked);
}
void FailSafeContext::ScheduleFailSafeCleanup(FabricIndex fabricIndex, bool addNocCommandInvoked, bool updateNocCommandInvoked)
{
// Not armed, but busy so cannot rearm (via General Commissioning cluster) until the flushing
// via `HandleDisarmFailSafe` path is complete.
// TODO: This is hacky and we need to remove all this event pushing business, to keep all fail-safe logic-only.
mFailSafeBusy = true;
SetFailSafeArmed(false);
ChipDeviceEvent event{ .Type = DeviceEventType::kFailSafeTimerExpired,
.FailSafeTimerExpired = { .fabricIndex = fabricIndex,
.addNocCommandHasBeenInvoked = addNocCommandInvoked,
.updateNocCommandHasBeenInvoked = updateNocCommandInvoked } };
CHIP_ERROR status = PlatformMgr().PostEvent(&event);
if (status != CHIP_NO_ERROR)
{
ChipLogError(FailSafe, "Failed to post fail-safe timer expired: %" CHIP_ERROR_FORMAT, status.Format());
}
PlatformMgr().ScheduleWork(HandleDisarmFailSafe, reinterpret_cast<intptr_t>(this));
}
CHIP_ERROR FailSafeContext::ArmFailSafe(FabricIndex accessingFabricIndex, System::Clock::Seconds16 expiryLengthSeconds)
{
VerifyOrReturnError(!IsFailSafeBusy(), CHIP_ERROR_INCORRECT_STATE);
CHIP_ERROR err = CHIP_NO_ERROR;
bool cancelTimersIfError = false;
if (!IsFailSafeArmed())
{
System::Clock::Timeout maxCumulativeTimeout = System::Clock::Seconds32(CHIP_DEVICE_CONFIG_MAX_CUMULATIVE_FAILSAFE_SEC);
SuccessOrExit(err = DeviceLayer::SystemLayer().StartTimer(maxCumulativeTimeout, HandleMaxCumulativeFailSafeTimer, this));
cancelTimersIfError = true;
}
SuccessOrExit(
err = DeviceLayer::SystemLayer().StartTimer(System::Clock::Seconds16(expiryLengthSeconds), HandleArmFailSafeTimer, this));
SetFailSafeArmed(true);
mFabricIndex = accessingFabricIndex;
exit:
if (err != CHIP_NO_ERROR && cancelTimersIfError)
{
DeviceLayer::SystemLayer().CancelTimer(HandleArmFailSafeTimer, this);
DeviceLayer::SystemLayer().CancelTimer(HandleMaxCumulativeFailSafeTimer, this);
}
return err;
}
void FailSafeContext::DisarmFailSafe()
{
DeviceLayer::SystemLayer().CancelTimer(HandleArmFailSafeTimer, this);
DeviceLayer::SystemLayer().CancelTimer(HandleMaxCumulativeFailSafeTimer, this);
ResetState();
ChipLogProgress(FailSafe, "Fail-safe cleanly disarmed");
}
void FailSafeContext::ForceFailSafeTimerExpiry()
{
if (!IsFailSafeArmed())
{
return;
}
// Cancel the timer since we force its action
DeviceLayer::SystemLayer().CancelTimer(HandleArmFailSafeTimer, this);
DeviceLayer::SystemLayer().CancelTimer(HandleMaxCumulativeFailSafeTimer, this);
FailSafeTimerExpired();
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,155 @@
/*
*
* Copyright (c) 2022 Project CHIP Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file
* A 'Fail Safe Context' SHALL be created on the receiver, to track fail-safe
* state information while the fail-safe is armed.
*/
#pragma once
#include <lib/core/CHIPError.h>
#include <lib/core/DataModelTypes.h>
#include <platform/internal/CHIPDeviceLayerInternal.h>
#include <system/SystemClock.h>
namespace chip {
namespace app {
class FailSafeContext
{
public:
// ===== Members for internal use by other Device Layer components.
/**
* @brief
* Only a single fail-safe timer is started on the device, if this function is called again
* when the fail-safe timer is currently armed, the currently-running fail-safe timer will
* first be cancelled, then the fail-safe timer will be re-armed.
*/
CHIP_ERROR ArmFailSafe(FabricIndex accessingFabricIndex, System::Clock::Seconds16 expiryLengthSeconds);
/**
* @brief Cleanly disarm failsafe timer, such as on CommissioningComplete
*/
void DisarmFailSafe();
void SetAddNocCommandInvoked(FabricIndex nocFabricIndex)
{
mAddNocCommandHasBeenInvoked = true;
mFabricIndex = nocFabricIndex;
}
void SetUpdateNocCommandInvoked() { mUpdateNocCommandHasBeenInvoked = true; }
void SetAddTrustedRootCertInvoked() { mAddTrustedRootCertHasBeenInvoked = true; }
void SetCsrRequestForUpdateNoc(bool isForUpdateNoc) { mIsCsrRequestForUpdateNoc = isForUpdateNoc; }
/**
* @brief
* Schedules a work to cleanup the FailSafe Context asynchronously after various cleanup work
* has completed.
*/
void ScheduleFailSafeCleanup(FabricIndex fabricIndex, bool addNocCommandInvoked, bool updateNocCommandInvoked);
bool IsFailSafeArmed(FabricIndex accessingFabricIndex) const
{
return IsFailSafeArmed() && MatchesFabricIndex(accessingFabricIndex);
}
// Returns true if the fail-safe is in a state where commands that require an armed
// fail-safe can no longer execute, but a new fail-safe can't be armed yet.
bool IsFailSafeBusy() const { return mFailSafeBusy; }
bool IsFailSafeArmed() const { return mFailSafeArmed; }
// True if it is possible to do an initial arming of the failsafe if needed.
// To be used in places where some action should take place only if the
// fail-safe could be armed after that action.
bool IsFailSafeFullyDisarmed() const { return !IsFailSafeArmed() && !IsFailSafeBusy(); }
bool MatchesFabricIndex(FabricIndex accessingFabricIndex) const
{
VerifyOrDie(IsFailSafeArmed());
return (accessingFabricIndex == mFabricIndex);
}
bool NocCommandHasBeenInvoked() const { return mAddNocCommandHasBeenInvoked || mUpdateNocCommandHasBeenInvoked; }
bool AddNocCommandHasBeenInvoked() const { return mAddNocCommandHasBeenInvoked; }
bool UpdateNocCommandHasBeenInvoked() const { return mUpdateNocCommandHasBeenInvoked; }
bool AddTrustedRootCertHasBeenInvoked() const { return mAddTrustedRootCertHasBeenInvoked; }
bool IsCsrRequestForUpdateNoc() const { return mIsCsrRequestForUpdateNoc; }
FabricIndex GetFabricIndex() const
{
VerifyOrDie(IsFailSafeArmed());
return mFabricIndex;
}
// Immediately disarms the timer and schedules a failsafe timer expiry.
// If the failsafe is not armed, this is a no-op.
void ForceFailSafeTimerExpiry();
private:
bool mFailSafeArmed = false;
bool mFailSafeBusy = false;
bool mAddNocCommandHasBeenInvoked = false;
bool mUpdateNocCommandHasBeenInvoked = false;
bool mAddTrustedRootCertHasBeenInvoked = false;
// The fact of whether a CSR occurred at all is stored elsewhere.
bool mIsCsrRequestForUpdateNoc = false;
FabricIndex mFabricIndex = kUndefinedFabricIndex;
/**
* @brief
* The callback function to be called when "fail-safe timer" expires.
*/
static void HandleArmFailSafeTimer(System::Layer * layer, void * aAppState);
/**
* @brief
* The callback function to be called when max cumulative time expires.
*/
static void HandleMaxCumulativeFailSafeTimer(System::Layer * layer, void * aAppState);
/**
* @brief
* The callback function to be called asynchronously after various cleanup work has completed
* to actually disarm the fail-safe.
*/
static void HandleDisarmFailSafe(intptr_t arg);
void SetFailSafeArmed(bool armed);
/**
* @brief Reset to unarmed basic state
*/
void ResetState()
{
SetFailSafeArmed(false);
mAddNocCommandHasBeenInvoked = false;
mUpdateNocCommandHasBeenInvoked = false;
mAddTrustedRootCertHasBeenInvoked = false;
mFailSafeBusy = false;
mIsCsrRequestForUpdateNoc = false;
}
void FailSafeTimerExpired();
CHIP_ERROR CommitToStorage();
};
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,54 @@
/*
* Copyright (c) 2022-2023 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <app-common/zap-generated/ids/Attributes.h>
#include <app/AppConfig.h>
#include <lib/support/CodeUtils.h>
namespace chip {
namespace app {
/**
* List of attribute ids of attributes that appear on every cluster and have
* values that are always produced via code, hence do not appear in attribute
* metadata to save space. These _must_ appear in order.
*/
constexpr AttributeId GlobalAttributesNotInMetadata[] = {
Clusters::Globals::Attributes::GeneratedCommandList::Id,
Clusters::Globals::Attributes::AcceptedCommandList::Id,
Clusters::Globals::Attributes::AttributeList::Id,
};
static_assert(ArrayIsSorted(GlobalAttributesNotInMetadata), "Array of global attribute ids must be sorted");
inline bool IsSupportedGlobalAttributeNotInMetadata(AttributeId attributeId)
{
for (auto & attr : GlobalAttributesNotInMetadata)
{
if (attr == attributeId)
{
return true;
}
}
return false;
}
} // namespace app
} // namespace chip

View File

@@ -0,0 +1,42 @@
/*
*
* Copyright (c) 2024 Project CHIP Authors
* All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "InteractionModelDelegatePointers.h"
#if CHIP_CONFIG_STATIC_GLOBAL_INTERACTION_MODEL_ENGINE
// TODO: It would be nice to not need to couple the pointers class
// to the global interaction model engine
#include "InteractionModelEngine.h"
namespace chip {
template <>
app::TimedHandlerDelegate * GlobalInstanceProvider<app::TimedHandlerDelegate>::InstancePointer()
{
return app::InteractionModelEngine::GetInstance();
}
template <>
app::WriteHandlerDelegate * GlobalInstanceProvider<app::WriteHandlerDelegate>::InstancePointer()
{
return app::InteractionModelEngine::GetInstance();
}
} // namespace chip
#endif

Some files were not shown because too many files have changed in this diff Show More