Coverage for functions \ flipdare \ generated \ model \ backend \ metric \ count_metric.py: 100%

0 statements  

« prev     ^ index     » next       coverage.py v7.13.0, created at 2026-05-08 12:22 +1000

1#!/usr/bin/env python 

2# 

3# Copyright (c) 2026 Flipdare Pty Ltd. All rights reserved. 

4# 

5# This file is part of Flipdare's proprietary software and contains 

6# confidential and copyrighted material. Unauthorised copying, 

7# modification, distribution, or use of this file is strictly 

8# prohibited without prior written permission from Flipdare Pty Ltd. 

9# 

10# This software includes third-party components licensed under MIT, 

11# BSD, and Apache 2.0 licences. See THIRD_PARTY_NOTICES for details. 

12# 

13# NOTE: THIS FILE IS AUTO GENERATED. DO NOT EDIT. 

14# 

15# Generated by codegen_models.py 

16# 

17# Modify 'codegen_models.py' 

18# and re-run the script above to update. 

19# 

20# pragma: no cover 

21from __future__ import annotations 

22from typing import Any, TypedDict, cast, Unpack 

23from enum import StrEnum 

24from pydantic import ConfigDict, TypeAdapter 

25from flipdare.firestore.core.app_base_model import AppBaseModel 

26 

27 

28class CountMetricKeys(StrEnum): 

29 SUCCESS_CT = "success_ct" 

30 FAILED_CT = "failed_ct" 

31 SKIPPED_CT = "skipped_ct" 

32 DURATION = "duration" 

33 

34 

35class CountMetric(AppBaseModel): 

36 """Pass/fail metric with counts of passed, failed, and skipped tests, as well as duration.""" 

37 

38 model_config = ConfigDict(populate_by_name=True) 

39 

40 success_ct: int 

41 failed_ct: int 

42 skipped_ct: int 

43 duration: int 

44 

45 @classmethod 

46 def validate_partial(cls, **data: Unpack[CountMetricDict]) -> dict[str, Any]: 

47 """ 

48 Uses Unpack to give you autocomplete and static warnings 

49 if you pass an invalid key or type in your code. 

50 

51 Returns a dict with Firestore field names (aliases) for use with batch.update(). 

52 """ 

53 result: dict[str, Any] = {} 

54 for k, v in data.items(): 

55 if k in cls.__pydantic_fields__: 

56 field_info = cls.__pydantic_fields__[k] 

57 validated_value = cast( 

58 "Any", 

59 TypeAdapter(field_info.annotation).validate_python(v), 

60 ) 

61 # Use alias if defined, otherwise use field name 

62 output_key = field_info.alias or k 

63 result[output_key] = validated_value 

64 return result 

65 

66 # ---- Convenience factories ----------------------------------------- 

67 

68 @classmethod 

69 def empty(cls) -> CountMetric: 

70 # empty, this should show up in outlier analysis if there are significantly 

71 # processed entries. 

72 return cls( 

73 success_ct=0, 

74 failed_ct=0, 

75 skipped_ct=0, 

76 duration=0, 

77 ) 

78 

79 @classmethod 

80 def error(cls, duration: int) -> CountMetric: 

81 # return' a sufficiently high result, so this stat will show up 

82 # in regresssion analysis 

83 return cls( 

84 success_ct=0, 

85 failed_ct=10000, 

86 skipped_ct=0, 

87 duration=duration, 

88 ) 

89 

90 # ---- Convenience predicates ----------------------------------------- 

91 

92 def merge(self, other: CountMetric) -> CountMetric: 

93 return CountMetric( 

94 success_ct=self.success_ct + other.success_ct, 

95 failed_ct=self.failed_ct + other.failed_ct, 

96 skipped_ct=self.skipped_ct + other.skipped_ct, 

97 duration=self.duration + other.duration, 

98 ) 

99 

100 

101COUNTMETRIC_FIELD_NAMES: list[str] = list(CountMetric.model_fields.keys()) 

102 

103 

104class CountMetricDict(TypedDict, total=False): 

105 success_ct: int 

106 failed_ct: int 

107 skipped_ct: int 

108 duration: int