8000 TC-TMP-2.1: Allow tolerance to be optional by cecille · Pull Request #38771 · project-chip/connectedhomeip · GitHub
[go: up one dir, main page]
More Web Proxy on the site http://driver.im/
Skip to content

TC-TMP-2.1: Allow tolerance to be optional #38771

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
May 12, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 23 additions & 5 deletions src/python_testing/TC_TMP_2_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,23 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# === BEGIN CI TEST ARGUMENTS ===
# test-runner-runs:
# run1:
# app: ${ALL_CLUSTERS_APP}
# app-args: --discriminator 1234 --KVS kvs1 --trace-to json:${TRACE_APP}.json
# script-args: >
# --storage-path admin_storage.json
# --commissioning-method on-network
# --discriminator 1234
# --passcode 20202021
# --endpoint 1
# --trace-to json:${TRACE_TEST_JSON}.json
# --trace-to perfetto:${TRACE_TEST_PERFETTO}.perfetto
# factory-reset: true
# quiet: true
# === END CI TEST ARGUMENTS ===

import chip.clusters as Clusters
from chip.clusters.Types import NullValue
Expand All @@ -39,7 +56,7 @@ def steps_TC_TMP_2_1(self) -> list[TestStep]:
"Verify that -27315 ≤ `min_measured_value` < `max_bound`"),
TestStep(6, "TH reads the MeasuredValue attribute from the DUT",
"Verify that the DUT response contains either null or a int16 where `min_bound` ≤ MeasuredValue ≤ `max_bound`."),
TestStep(7, "TH reads the Tolerance attribute from the DUT",
TestStep(7, "If the tolerance attribute is supported, TH reads the Tolerance attribute from the DUT",
"Verify that Tolerance is in the range of 0 to 2048"),
]

Expand Down Expand Up @@ -87,10 +104,11 @@ async def test_TC_TMP_2_1(self):
measured_value, max_bound, "Measured value is greater than max bound")

self.step(7)
tolerance = await self.read_single_attribute_check_success(cluster=cluster, attribute=attr.Tolerance)
asserts.assert_greater_equal(tolerance, 0, "Tolerance is less than 0")
asserts.assert_less_equal(
tolerance, 2048, "Tolerance is greater than 2048")
if await self.attribute_guard(self.get_endpoint(), attr.Tolerance):
tolerance = await self.read_single_attribute_check_success(cluster=cluster, attribute=attr.Tolerance)
asserts.assert_greater_equal(tolerance, 0, "Tolerance is less than 0")
asserts.assert_less_equal(
tolerance, 2048, "Tolerance is greater than 2048")


if __name__ == "__main__":
Expand Down
4 changes: 0 additions & 4 deletions src/python_testing/test_metadata.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -58,10 +58,6 @@ not_automated:
reason:
Currently isn't enabled because we don't have any examples with
conformant PICS
- name: TC_TMP_2_1.py
reason:
src/python_testing/test_testing/test_TC_TMP_2_1.py is the Unit test of
this test
- name: TC_OCC_3_1.py
reason:
There are CI issues for the test cases that implements manually
Expand Down
29 changes: 26 additions & 3 deletions src/python_testing/test_testing/test_TC_TMP_2_1.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,9 @@ class TestSpec():
TestSpec(0, 0, 0, 10, False),
# min specified, max out of range top
TestSpec(0, 32768, 0, 10, False),
]

TEST_CASES_TOLERANCE = [
# ==============================
# Tolerance test cases
# ==============================
Expand All @@ -147,7 +149,6 @@ class TestSpec():
TestSpec(NullValue, NullValue, 0, -1, False),
# Tolerance out of range top
TestSpec(NullValue, NullValue, 0, 2049, False),

]


Expand All @@ -157,6 +158,19 @@ def test_spec_to_attribute_cache(test_spec: TestSpec) -> Attribute.AsyncReadTran
resp = Attribute.AsyncReadTransaction.ReadResponse({}, [], {})
resp.attributes = {1: {c: {attr.MaxMeasuredValue: test_spec.max,
attr.MinMeasuredValue: test_spec.min, attr.MeasuredValue: test_spec.measured, attr.Tolerance: test_spec.tolerance}}}
resp.attributes[1][c][attr.AttributeList] = [a.attribute_id for a in resp.attributes[1][c].keys()]

return resp


def test_spec_to_attribute_cache_no_tolerance(test_spec: TestSpec) -> Attribute.AsyncReadTransaction.ReadResponse:
c = Clusters.TemperatureMeasurement
attr = Clusters.TemperatureMeasurement.Attributes
resp = Attribute.AsyncReadTransaction.ReadResponse({}, [], {})
resp.attributes = {1: {c: {attr.MaxMeasuredValue: test_spec.max,
attr.MinMeasuredValue: test_spec.min, attr.MeasuredValue: test_spec.measured, attr.Tolerance: test_spec.tolerance}}}
resp.attributes[1][c][attr.AttributeList] = [a.attribute_id for a in resp.attributes[1][c].keys()]

return resp


Expand All @@ -167,11 +181,20 @@ def main():
for idx, t in enumerate(TEST_CASES):
ok = test_runner.run_test_with_mock_read(test_spec_to_attribute_cache(t)) == t.expect_pass
if not ok:
failures.append(f"Measured test case failure: {idx} {t}")
failures.append(f"Measured test case failure (tolerance included): {idx} {t}")
ok = test_runner.run_test_with_mock_read(test_spec_to_attribute_cache_no_tolerance(t)) == t.expect_pass
if not ok:
failures.append(f"Measured test case failure (tolerance not included): {idx} {t}")

for idx, t in enumerate(TEST_CASES_TOLERANCE):
ok = test_runner.run_test_with_mock_read(test_spec_to_attribute_cache(t)) == t.expect_pass
if not ok:
failures.append(f"Measured tolerance test case failure: {idx} {t}")

test_runner.Shutdown()
num_tests = len(TEST_CASES)*2 + len(TEST_CASES_TOLERANCE)
print(
f"Test of tests: run {len(TEST_CASES)}, test response correct: {len(TEST_CASES) - len(failures)} test response incorrect: {len(failures)}")
f"Test of tests: run {num_tests}, test response correct: {num_tests - len(failures)} test response incorrect: {len(failures)}")
for f in failures:
print(f)

Expand Down
Loading
0