From 0198734ebf46cbe829ca0093d3c0181fa59611db Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 17 Jun 2024 14:38:36 -0600 Subject: [PATCH 01/35] Refactor __init__ --- .../sdk/metrics/_internal/aggregation.py | 67 ++-- ...xponential_bucket_histogram_aggregation.py | 291 +++++++++++++----- 2 files changed, 242 insertions(+), 116 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 3a09cdcfea1..3d48b6c702b 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -544,6 +544,7 @@ class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): def __init__( self, attributes: Attributes, + instrument_aggregation_temporality: AggregationTemporality, start_time_unix_nano: int, # This is the default maximum number of buckets per positive or # negative number range. The value 160 is specified by OpenTelemetry. @@ -552,9 +553,16 @@ def __init__( max_size: int = 160, max_scale: int = 20, ): - super().__init__(attributes) # max_size is the maximum capacity of the positive and negative # buckets. + # _sum is the sum of all the values aggregated by this aggregator. + # _count is the count of all calls to aggregate. + # _zero_count is the count of all the calls to aggregate when the value + # to be aggregated is exactly 0. + # _min is the smallest value aggregated by this aggregator. + # _max is the smallest value aggregated by this aggregator. + # _positive holds the positive values. + # _negative holds the negative values by their absolute value. if max_size < self._min_max_size: raise ValueError( f"Buckets max size {max_size} is smaller than " @@ -566,53 +574,44 @@ def __init__( f"Buckets max size {max_size} is larger than " "maximum max size {self._max_max_size}" ) + if max_scale > 20: + _logger.warning( + "max_scale is set to %s which is " + "larger than the recommended value of 20", + self._max_scale, + ) + + super().__init__(attributes) self._max_size = max_size self._max_scale = max_scale - - # _sum is the sum of all the values aggregated by this aggregator. + self._min = inf + self._max = -inf self._sum = 0 - - # _count is the count of all calls to aggregate. self._count = 0 - - # _zero_count is the count of all the calls to aggregate when the value - # to be aggregated is exactly 0. self._zero_count = 0 - # _min is the smallest value aggregated by this aggregator. - self._min = inf - - # _max is the smallest value aggregated by this aggregator. - self._max = -inf - - # _positive holds the positive values. - self._positive = Buckets() + self._start_time_unix_nano = start_time_unix_nano + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) - # _negative holds the negative values by their absolute value. - self._negative = Buckets() + self._current_value_positive = None + self._current_value_negative = None - # _mapping corresponds to the current scale, is shared by both the - # positive and negative buckets. + self._previous_collection_start_nano = self._start_time_unix_nano + self._previous_cumulative_value_positive = Buckets() + self._previous_cumulative_value_negative = Buckets() + self._previous_min = inf + self._previous_max = -inf + self._previous_sum = 0 + self._previous_count = 0 + self._previous_zero_count = 0 - if self._max_scale > 20: - _logger.warning( - "max_scale is set to %s which is " - "larger than the recommended value of 20", - self._max_scale, - ) self._mapping = _new_exponential_mapping(self._max_scale) - self._instrument_aggregation_temporality = AggregationTemporality.DELTA - self._start_time_unix_nano = start_time_unix_nano - self._previous_scale = None self._previous_start_time_unix_nano = None - self._previous_zero_count = None - self._previous_count = None - self._previous_sum = None - self._previous_max = None - self._previous_min = None self._previous_positive = None self._previous_negative = None diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index e243e09643d..cabd7b24509 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -73,8 +73,8 @@ def swap( ): for attribute in [ - "_positive", - "_negative", + "_current_value_positive", + "_current_value_negative", "_sum", "_count", "_zero_count", @@ -137,14 +137,24 @@ def require_equal(self, a, b): self.assertEqual(a._mapping.scale, b._mapping.scale) - self.assertEqual(len(a._positive), len(b._positive)) - self.assertEqual(len(a._negative), len(b._negative)) + self.assertEqual( + len(a._current_value_positive), len(b._current_value_positive) + ) + self.assertEqual( + len(a._current_value_negative), len(b._current_value_negative) + ) - for index in range(len(a._positive)): - self.assertEqual(a._positive[index], b._positive[index]) + for index in range(len(a._current_value_positive)): + self.assertEqual( + a._current_value_positive[index], + b._current_value_positive[index] + ) - for index in range(len(a._negative)): - self.assertEqual(a._negative[index], b._negative[index]) + for index in range(len(a._current_value_negative)): + self.assertEqual( + a._current_value_negative[index], + b._current_value_negative[index] + ) def test_alternating_growth_0(self): """ @@ -161,7 +171,9 @@ def test_alternating_growth_0(self): # agg is an instance of github.com/lightstep/otel-launcher-go/lightstep/sdk/metric/aggregator/histogram/structure.Histogram[float64] exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) @@ -169,11 +181,15 @@ def test_alternating_growth_0(self): exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -1 + exponential_histogram_aggregation._current_value_positive.offset, + -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - get_counts(exponential_histogram_aggregation._positive), [1, 1, 1] + get_counts( + exponential_histogram_aggregation._current_value_positive + ), + [1, 1, 1] ) def test_alternating_growth_1(self): @@ -185,7 +201,9 @@ def test_alternating_growth_1(self): """ exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) exponential_histogram_aggregation.aggregate(Measurement(2, Mock())) @@ -196,11 +214,15 @@ def test_alternating_growth_1(self): exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock())) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -1 + exponential_histogram_aggregation._current_value_positive.offset, + -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, -1) self.assertEqual( - get_counts(exponential_histogram_aggregation._positive), [2, 3, 1] + get_counts( + exponential_histogram_aggregation._current_value_positive + ), + [2, 3, 1] ) def test_permutations(self): @@ -246,7 +268,10 @@ def test_permutations(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=2 + Mock(), + AggregationTemporality.DELTA, + Mock(), + max_size=2 ) ) @@ -261,19 +286,26 @@ def test_permutations(self): expected["scale"], ) self.assertEqual( - exponential_histogram_aggregation._positive.offset, + exponential_histogram_aggregation. + _current_value_positive. + offset, expected["offset"], ) self.assertEqual( - len(exponential_histogram_aggregation._positive), + len( + exponential_histogram_aggregation. + _current_value_positive + ), expected["len"], ) self.assertEqual( - exponential_histogram_aggregation._positive[0], + exponential_histogram_aggregation. + _current_value_positive[0], expected["at_0"], ) self.assertEqual( - exponential_histogram_aggregation._positive[1], + exponential_histogram_aggregation. + _current_value_positive[1], expected["at_1"], ) @@ -292,7 +324,10 @@ def ascending_sequence_test( exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=max_size + Mock(), + AggregationTemporality.DELTA, + Mock(), + max_size=max_size ) ) @@ -317,7 +352,10 @@ def ascending_sequence_test( init_scale, exponential_histogram_aggregation._mapping._scale ) self.assertEqual( - offset, exponential_histogram_aggregation._positive.offset + offset, + exponential_histogram_aggregation. + _current_value_positive. + offset ) exponential_histogram_aggregation.aggregate( @@ -326,7 +364,7 @@ def ascending_sequence_test( sum_ += max_val self.assertNotEqual( - 0, exponential_histogram_aggregation._positive[0] + 0, exponential_histogram_aggregation._current_value_positive[0] ) # The maximum-index filled bucket is at or @@ -337,12 +375,16 @@ def ascending_sequence_test( total_count = 0 for index in range( - len(exponential_histogram_aggregation._positive) + len(exponential_histogram_aggregation._current_value_positive) ): - total_count += exponential_histogram_aggregation._positive[ - index - ] - if exponential_histogram_aggregation._positive[index] != 0: + total_count += ( + exponential_histogram_aggregation. + _current_value_positive[index] + ) + if ( + exponential_histogram_aggregation. + _current_value_positive[index] != 0 + ): max_fill = index # FIXME the corresponding Go code is @@ -369,15 +411,21 @@ def ascending_sequence_test( index = mapping.map_to_index(min_val) self.assertEqual( - index, exponential_histogram_aggregation._positive.offset + index, + exponential_histogram_aggregation. + _current_value_positive. + offset ) index = mapping.map_to_index(max_val) self.assertEqual( index, - exponential_histogram_aggregation._positive.offset - + len(exponential_histogram_aggregation._positive) + exponential_histogram_aggregation. + _current_value_positive.offset + + len( + exponential_histogram_aggregation._current_value_positive + ) - 1, ) @@ -394,7 +442,10 @@ def mock_increment(self, bucket_index: int) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=256 + Mock(), + AggregationTemporality.DELTA, + Mock(), + max_size=256 ) ) @@ -408,12 +459,14 @@ def mock_increment(self, bucket_index: int) -> None: for value in range(2, 257): expect += value * increment with patch.object( - exponential_histogram_aggregation._positive, + exponential_histogram_aggregation. + _current_value_positive, "increment_bucket", # new=positive_mock MethodType( mock_increment, - exponential_histogram_aggregation._positive, + exponential_histogram_aggregation. + _current_value_positive, ), ): exponential_histogram_aggregation.aggregate( @@ -434,16 +487,22 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual( 256 - ((1 << scale) - 1), - len(exponential_histogram_aggregation._positive), + len( + exponential_histogram_aggregation. + _current_value_positive + ), ) self.assertEqual( (1 << scale) - 1, - exponential_histogram_aggregation._positive.offset, + exponential_histogram_aggregation. + _current_value_positive. + offset, ) for index in range(0, 256): self.assertLessEqual( - exponential_histogram_aggregation._positive[index], + exponential_histogram_aggregation. + _current_value_positive[index], 6 * increment, ) @@ -451,12 +510,12 @@ def test_move_into(self): exponential_histogram_aggregation_0 = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=256 + Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 ) ) exponential_histogram_aggregation_1 = ( _ExponentialBucketHistogramAggregation( - Mock(), Mock(), max_size=256 + Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 ) ) @@ -489,36 +548,46 @@ def test_move_into(self): self.assertEqual( 256 - ((1 << scale) - 1), - len(exponential_histogram_aggregation_1._positive), + len(exponential_histogram_aggregation_1._current_value_positive), ) self.assertEqual( (1 << scale) - 1, - exponential_histogram_aggregation_1._positive.offset, + exponential_histogram_aggregation_1._current_value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( - exponential_histogram_aggregation_1._positive[index], 6 + exponential_histogram_aggregation_1. + _current_value_positive[index], + 6 ) def test_very_large_numbers(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=2) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=2 + ) ) def expect_balanced(count: int): self.assertEqual( - 2, len(exponential_histogram_aggregation._positive) + 2, + len(exponential_histogram_aggregation._current_value_positive) ) self.assertEqual( - -1, exponential_histogram_aggregation._positive.offset + -1, + exponential_histogram_aggregation. + _current_value_positive. + offset ) self.assertEqual( - count, exponential_histogram_aggregation._positive[0] + count, + exponential_histogram_aggregation._current_value_positive[0] ) self.assertEqual( - count, exponential_histogram_aggregation._positive[1] + count, + exponential_histogram_aggregation._current_value_positive[1] ) exponential_histogram_aggregation.aggregate( @@ -580,7 +649,9 @@ def expect_balanced(count: int): def test_full_range(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=2) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=2 + ) ) exponential_histogram_aggregation.aggregate( @@ -602,18 +673,25 @@ def test_full_range(self): self.assertEqual( _ExponentialBucketHistogramAggregation._min_max_size, - len(exponential_histogram_aggregation._positive), + len(exponential_histogram_aggregation._current_value_positive), ) self.assertEqual( - -1, exponential_histogram_aggregation._positive.offset + -1, + exponential_histogram_aggregation._current_value_positive.offset + ) + self.assertLessEqual( + exponential_histogram_aggregation._current_value_positive[0], 2 + ) + self.assertLessEqual( + exponential_histogram_aggregation._current_value_positive[1], 1 ) - self.assertLessEqual(exponential_histogram_aggregation._positive[0], 2) - self.assertLessEqual(exponential_histogram_aggregation._positive[1], 1) def test_aggregator_min_max(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [1, 3, 5, 7, 9]: @@ -625,7 +703,9 @@ def test_aggregator_min_max(self): self.assertEqual(9, exponential_histogram_aggregation._max) exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [-1, -3, -5, -7, -9]: @@ -639,21 +719,27 @@ def test_aggregator_min_max(self): def test_aggregator_copy_swap(self): exponential_histogram_aggregation_0 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [1, 3, 5, 7, 9, -1, -3, -5]: exponential_histogram_aggregation_0.aggregate( Measurement(value, Mock()) ) exponential_histogram_aggregation_1 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) for value in [5, 4, 3, 2]: exponential_histogram_aggregation_1.aggregate( Measurement(value, Mock()) ) exponential_histogram_aggregation_2 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) swap( @@ -662,8 +748,8 @@ def test_aggregator_copy_swap(self): ) # pylint: disable=unnecessary-dunder-call - exponential_histogram_aggregation_2._positive.__init__() - exponential_histogram_aggregation_2._negative.__init__() + exponential_histogram_aggregation_2._current_value_positive.__init__() + exponential_histogram_aggregation_2._current_value_negative.__init__() exponential_histogram_aggregation_2._sum = 0 exponential_histogram_aggregation_2._count = 0 exponential_histogram_aggregation_2._zero_count = 0 @@ -674,8 +760,8 @@ def test_aggregator_copy_swap(self): ) for attribute in [ - "_positive", - "_negative", + "_current_value_positive", + "_current_value_negative", "_sum", "_count", "_zero_count", @@ -697,7 +783,9 @@ def test_aggregator_copy_swap(self): def test_zero_count_by_increment(self): exponential_histogram_aggregation_0 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) increment = 10 @@ -707,10 +795,14 @@ def test_zero_count_by_increment(self): Measurement(0, Mock()) ) exponential_histogram_aggregation_1 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) - # positive_mock = Mock(wraps=exponential_histogram_aggregation_1._positive) + # positive_mock = Mock( + # wraps=exponential_histogram_aggregation_1._current_value_positive + # ) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket @@ -719,11 +811,12 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment with patch.object( - exponential_histogram_aggregation_1._positive, + exponential_histogram_aggregation_1._current_value_positive, "increment_bucket", # new=positive_mock MethodType( - mock_increment, exponential_histogram_aggregation_1._positive + mock_increment, + exponential_histogram_aggregation_1._current_value_positive ), ): exponential_histogram_aggregation_1.aggregate( @@ -740,7 +833,9 @@ def mock_increment(self, bucket_index: int) -> None: def test_one_count_by_increment(self): exponential_histogram_aggregation_0 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) increment = 10 @@ -750,10 +845,14 @@ def test_one_count_by_increment(self): Measurement(1, Mock()) ) exponential_histogram_aggregation_1 = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock()) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock() + ) ) - # positive_mock = Mock(wraps=exponential_histogram_aggregation_1._positive) + # positive_mock = Mock( + # wraps=exponential_histogram_aggregation_1._current_value_positive + # ) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket @@ -762,11 +861,12 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment with patch.object( - exponential_histogram_aggregation_1._positive, + exponential_histogram_aggregation_1._current_value_positive, "increment_bucket", # new=positive_mock MethodType( - mock_increment, exponential_histogram_aggregation_1._positive + mock_increment, + exponential_histogram_aggregation_1._current_value_positive ), ): exponential_histogram_aggregation_1.aggregate( @@ -820,6 +920,7 @@ def test_min_max_size(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + AggregationTemporality.DELTA, Mock(), max_size=_ExponentialBucketHistogramAggregation._min_max_size, ) @@ -833,7 +934,11 @@ def test_min_max_size(self): # This means the smallest max_scale is enough for the full range of the # normal floating point values. self.assertEqual( - len(exponential_histogram_aggregation._positive._counts), + len( + exponential_histogram_aggregation. + _current_value_positive. + _counts + ), exponential_histogram_aggregation._min_max_size, ) @@ -844,6 +949,7 @@ def test_aggregate_collect(self): exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + AggregationTemporality.DELTA, Mock(), ) ) @@ -865,6 +971,7 @@ def test_collect_results_cumulative(self) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( Mock(), + AggregationTemporality.DELTA, Mock(), ) ) @@ -949,7 +1056,11 @@ def test_collect_results_cumulative(self) -> None: self.assertEqual(collection_1.max, 8) def test_cumulative_aggregation_with_random_data(self) -> None: - histogram = _ExponentialBucketHistogramAggregation(Mock(), Mock()) + histogram = _ExponentialBucketHistogramAggregation( + Mock(), + AggregationTemporality.DELTA, + Mock(), + ) def collect_and_validate() -> None: result: ExponentialHistogramDataPoint = histogram.collect( @@ -994,7 +1105,9 @@ def collect_and_validate() -> None: def test_merge_collect_cumulative(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) for value in [2, 4, 8, 16]: @@ -1003,9 +1116,13 @@ def test_merge_collect_cumulative(self): ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) - self.assertEqual(exponential_histogram_aggregation._positive.offset, 0) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._current_value_positive.offset, + 0 + ) + self.assertEqual( + exponential_histogram_aggregation._current_value_positive.counts, + [1, 1, 1, 1] ) result = exponential_histogram_aggregation.collect( @@ -1020,10 +1137,12 @@ def test_merge_collect_cumulative(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -4 + exponential_histogram_aggregation._current_value_positive.offset, + -4 ) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._current_value_positive.counts, + [1, 1, 1, 1] ) result_1 = exponential_histogram_aggregation.collect( @@ -1036,7 +1155,9 @@ def test_merge_collect_cumulative(self): def test_merge_collect_delta(self): exponential_histogram_aggregation = ( - _ExponentialBucketHistogramAggregation(Mock(), Mock(), max_size=4) + _ExponentialBucketHistogramAggregation( + Mock(), AggregationTemporality.DELTA, Mock(), max_size=4 + ) ) for value in [2, 4, 8, 16]: @@ -1045,9 +1166,13 @@ def test_merge_collect_delta(self): ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) - self.assertEqual(exponential_histogram_aggregation._positive.offset, 0) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._current_value_positive.offset, + 0 + ) + self.assertEqual( + exponential_histogram_aggregation._current_value_positive.counts, + [1, 1, 1, 1] ) result = exponential_histogram_aggregation.collect( @@ -1062,10 +1187,12 @@ def test_merge_collect_delta(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._positive.offset, -4 + exponential_histogram_aggregation._current_value_positive.offset, + -4 ) self.assertEqual( - exponential_histogram_aggregation._positive.counts, [1, 1, 1, 1] + exponential_histogram_aggregation._current_value_positive.counts, + [1, 1, 1, 1] ) result_1 = exponential_histogram_aggregation.collect( From 931933a2024a5c74e1b414cc4fc8e0358fc110ff Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 17 Jun 2024 15:42:51 -0600 Subject: [PATCH 02/35] Refactor aggregate --- .../sdk/metrics/_internal/aggregation.py | 91 ++++++++----------- 1 file changed, 37 insertions(+), 54 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 3d48b6c702b..541b279f172 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -622,70 +622,57 @@ def aggregate(self, measurement: Measurement) -> None: value = measurement.value - # 0. Set the following attributes: - # _min - # _max - # _count - # _zero_count - # _sum + if value > 0: + if self._current_value_positive is None: + self._current_value_positive = Buckets() + current_value = self._current_value_positive + + else: + if self._current_value_negative is None: + self._current_value_negative = Buckets() + value = -value + current_value = self._current_value_negative + + self._sum += value + if value < self._min: self._min = value - if value > self._max: + elif value > self._max: self._max = value self._count += 1 if value == 0: self._zero_count += 1 - # No need to do anything else if value is zero, just increment the - # zero count. return - self._sum += value - - # 1. Use the positive buckets for positive values and the negative - # buckets for negative values. - if value > 0: - buckets = self._positive - - else: - # Both exponential and logarithm mappings use only positive values - # so the absolute value is used here. - value = -value - buckets = self._negative - - # 2. Compute the index for the value at the current scale. index = self._mapping.map_to_index(value) - # IncrementIndexBy starts here - - # 3. Determine if a change of scale is needed. is_rescaling_needed = False low, high = 0, 0 - if len(buckets) == 0: - buckets.index_start = index - buckets.index_end = index - buckets.index_base = index + if len(current_value) == 0: + current_value.index_start = index + current_value.index_end = index + current_value.index_base = index elif ( - index < buckets.index_start - and (buckets.index_end - index) >= self._max_size + index < current_value.index_start + and (current_value.index_end - index) >= self._max_size ): is_rescaling_needed = True low = index - high = buckets.index_end + high = current_value.index_end elif ( - index > buckets.index_end - and (index - buckets.index_start) >= self._max_size + index > current_value.index_end + and (index - current_value.index_start) >= self._max_size ): is_rescaling_needed = True - low = buckets.index_start + low = current_value.index_start high = index - # 4. Rescale the mapping if needed. if is_rescaling_needed: scale_change = self._get_scale_change(low, high) @@ -699,33 +686,29 @@ def aggregate(self, measurement: Measurement) -> None: index = self._mapping.map_to_index(value) - # 5. If the index is outside - # [buckets.index_start, buckets.index_end] readjust the buckets - # boundaries or add more buckets. - if index < buckets.index_start: - span = buckets.index_end - index + if index < current_value.index_start: + span = current_value.index_end - index - if span >= len(buckets.counts): - buckets.grow(span + 1, self._max_size) + if span >= len(current_value.counts): + current_value.grow(span + 1, self._max_size) - buckets.index_start = index + current_value.index_start = index - elif index > buckets.index_end: - span = index - buckets.index_start + elif index > current_value.index_end: + span = index - current_value.index_start - if span >= len(buckets.counts): - buckets.grow(span + 1, self._max_size) + if span >= len(current_value.counts): + current_value.grow(span + 1, self._max_size) - buckets.index_end = index + current_value.index_end = index - # 6. Compute the index of the bucket to be incremented. - bucket_index = index - buckets.index_base + bucket_index = index - current_value.index_base if bucket_index < 0: - bucket_index += len(buckets.counts) + bucket_index += len(current_value.counts) # 7. Increment the bucket. - buckets.increment_bucket(bucket_index) + current_value.increment_bucket(bucket_index) def collect( self, From 11e12bf3be2dd13e23dd93f7ff372e1b07fc5f42 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 17 Jun 2024 16:16:29 -0600 Subject: [PATCH 03/35] Handle case for DELTA DELTA --- .../sdk/metrics/_internal/aggregation.py | 70 +++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 541b279f172..d6d5e1458a1 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -719,6 +719,76 @@ def collect( Atomically return a point for the current value of the metric. """ # pylint: disable=too-many-statements, too-many-locals + with self._lock: + current_value_positive = self._current_value_positive + current_value_negative = self._current_value_negative + sum_ = self._sum + min_ = self._min + max_ = self._max + count = self._count + zero_count = self._zero_count + if self._count == self._zero_count: + scale = 0 + else: + scale = self._mapping.scale + + self._current_value_positive = None + self._current_value_negative = None + self._sum = 0 + self._min = inf + self._max = -inf + self._count = 0 + self._zero_count = 0 + + if ( + self._instrument_aggregation_temporality + is AggregationTemporality.DELTA + ): + # This happens when the corresponding instrument for this + # aggregation is synchronous. + if ( + collection_aggregation_temporality + is AggregationTemporality.DELTA + ): + + if ( + current_value_positive is None and + current_value_negative is None + ): + return None + + previous_collection_start_nano = ( + self._previous_collection_start_nano + ) + self._previous_collection_start_nano = ( + collection_start_nano + ) + + return ExponentialHistogramDataPoint( + attributes=self._attributes, + start_time_unix_nano=previous_collection_start_nano, + time_unix_nano=collection_start_nano, + count=count, + sum=sum_, + scale=scale, + zero_count=zero_count, + positive=BucketsPoint( + offset=current_value_positive.offset, + bucket_counts=( + current_value_positive.get_offset_counts() + ), + ), + negative=BucketsPoint( + offset=current_value_negative.offset, + bucket_counts=( + current_value_negative.get_offset_counts() + ), + ), + # FIXME: Find the right value for flags + flags=0, + min=min_, + max=max_, + ) with self._lock: if self._count == 0: From 475a8e4b8cea9e45472cc5a5ed771a3fdb463e8e Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 18 Jun 2024 16:26:51 -0600 Subject: [PATCH 04/35] Checkpoint before removing old code --- .../sdk/metrics/_internal/aggregation.py | 142 +++++++++++++++++- 1 file changed, 138 insertions(+), 4 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index d6d5e1458a1..9ba62068f8e 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -611,9 +611,6 @@ def __init__( self._mapping = _new_exponential_mapping(self._max_scale) self._previous_scale = None - self._previous_start_time_unix_nano = None - self._previous_positive = None - self._previous_negative = None def aggregate(self, measurement: Measurement) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals @@ -707,7 +704,6 @@ def aggregate(self, measurement: Measurement) -> None: if bucket_index < 0: bucket_index += len(current_value.counts) - # 7. Increment the bucket. current_value.increment_bucket(bucket_index) def collect( @@ -790,6 +786,141 @@ def collect( max=max_, ) + # Here collection_temporality is CUMULATIVE. + # instrument_temporality is always DELTA for the time being. + # Here we need to handle the case where: + # collect is called after at least one other call to collect + # (there is data in previous buckets, a call to merge is needed + # to handle possible differences in bucket sizes). + # collect is called without another call previous call to + # collect was made (there is no previous buckets, previous, + # empty buckets that are the same scale of the current buckets + # need to be made so that they can be cumulatively aggregated + # to the current buckets). + + # TODO, implement this case + + if current_value_positive is None: + current_value_positive = Buckets() + if current_value_negative is None: + current_value_negative = Buckets() + if self._previous_scale is None: + self._previous_scale = scale + + min_scale = min(self._previous_scale, scale) + + low_positive, high_positive = ( + self._get_low_high_previous_current( + self._previous_positive, + current_value_positive, + scale, + min_scale, + ) + ) + low_negative, high_negative = ( + self._get_low_high_previous_current( + self._previous_negative, + current_value_negative, + scale, + min_scale, + ) + ) + + min_scale = min( + min_scale + - self._get_scale_change(low_positive, high_positive), + min_scale + - self._get_scale_change(low_negative, high_negative), + ) + + # FIXME Go implementation checks if the histogram (not the mapping + # but the histogram) has a count larger than zero, if not, scale + # (the histogram scale) would be zero. See exponential.go 191 + self._downscale( + self._previous_scale - min_scale, + self._previous_positive, + self._previous_negative, + ) + self._merge( + self._previous_positive, + current_value_positive, + scale, + min_scale, + collection_aggregation_temporality, + ) + self._merge( + self._previous_negative, + current_value_negative, + scale, + min_scale, + collection_aggregation_temporality, + ) + + self._previous_scale = min_scale + + self._previous_cumulative_value_positive = ( + current_value_positive + ) + self._previous_cumulative_value_negative = ( + current_value_negative + ) + self._previous_min = min(min_, self._previous_min) + self._previous_max = max(max_, self._previous_max) + self._previous_sum = sum_ + self._previous_sum + self._previous_count = count + self._previous_count + self._previous_zero_count = ( + zero_count + self._previous_zero_count + ) + + return ExponentialHistogramDataPoint( + attributes=self._attributes, + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + count=self._previous_count, + sum=self._previous_sum, + scale=scale, + zero_count=self._previous_zero_count, + positive=BucketsPoint( + offset=current_value_positive.offset, + bucket_counts=current_value_positive.get_offset_counts(), + ), + negative=BucketsPoint( + offset=current_value_negative.offset, + bucket_counts=current_value_negative.get_offset_counts(), + ), + # FIXME: Find the right value for flags + flags=0, + min=min_, + max=max_, + ) + + """ + self._previous_cumulative_value = [ + current_value_element + previous_cumulative_value_element + for ( + current_value_element, + previous_cumulative_value_element, + ) in zip(current_value, self._previous_cumulative_value) + ] + self._previous_min = min(min_, self._previous_min) + self._previous_max = max(max_, self._previous_max) + self._previous_sum = sum_ + self._previous_sum + + return HistogramDataPoint( + attributes=self._attributes, + start_time_unix_nano=self._start_time_unix_nano, + time_unix_nano=collection_start_nano, + count=sum(self._previous_cumulative_value), + sum=self._previous_sum, + bucket_counts=tuple(self._previous_cumulative_value), + explicit_bounds=self._boundaries, + min=self._previous_min, + max=self._previous_max, + ) + """ + + return None + with self._lock: if self._count == 0: return None @@ -927,6 +1058,9 @@ def collect( current_negative = self._previous_negative else: + # We are currently not considering the case where an + # asyncrhonous instrument uses an exponential bucket + # aggregation. start_time_unix_nano = self._previous_start_time_unix_nano sum_ = current_sum - self._previous_sum zero_count = current_zero_count From ed078bbe83869929a197ba403b28b602e949cd5f Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 18 Jun 2024 16:35:20 -0600 Subject: [PATCH 05/35] Remove old code --- .../sdk/metrics/_internal/aggregation.py | 246 +----------------- 1 file changed, 13 insertions(+), 233 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 9ba62068f8e..1a9cb3257cc 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -619,14 +619,15 @@ def aggregate(self, measurement: Measurement) -> None: value = measurement.value + if self._current_value_positive is None: + self._current_value_positive = Buckets() + if self._current_value_negative is None: + self._current_value_negative = Buckets() + if value > 0: - if self._current_value_positive is None: - self._current_value_positive = Buckets() current_value = self._current_value_positive else: - if self._current_value_negative is None: - self._current_value_negative = Buckets() value = -value current_value = self._current_value_negative @@ -675,8 +676,8 @@ def aggregate(self, measurement: Measurement) -> None: scale_change = self._get_scale_change(low, high) self._downscale( scale_change, - self._positive, - self._negative, + self._current_value_positive, + self._current_value_negative, ) new_scale = self._mapping.scale - scale_change self._mapping = _new_exponential_mapping(new_scale) @@ -811,7 +812,7 @@ def collect( low_positive, high_positive = ( self._get_low_high_previous_current( - self._previous_positive, + self._previous_cumulative_value_positive, current_value_positive, scale, min_scale, @@ -819,7 +820,7 @@ def collect( ) low_negative, high_negative = ( self._get_low_high_previous_current( - self._previous_negative, + self._previous_cumulative_value_negative, current_value_negative, scale, min_scale, @@ -838,18 +839,18 @@ def collect( # (the histogram scale) would be zero. See exponential.go 191 self._downscale( self._previous_scale - min_scale, - self._previous_positive, - self._previous_negative, + self._previous_cumulative_value_positive, + self._previous_cumulative_value_negative, ) self._merge( - self._previous_positive, + self._previous_cumulative_value_positive, current_value_positive, scale, min_scale, collection_aggregation_temporality, ) self._merge( - self._previous_negative, + self._previous_cumulative_value_negative, current_value_negative, scale, min_scale, @@ -894,229 +895,8 @@ def collect( max=max_, ) - """ - self._previous_cumulative_value = [ - current_value_element + previous_cumulative_value_element - for ( - current_value_element, - previous_cumulative_value_element, - ) in zip(current_value, self._previous_cumulative_value) - ] - self._previous_min = min(min_, self._previous_min) - self._previous_max = max(max_, self._previous_max) - self._previous_sum = sum_ + self._previous_sum - - return HistogramDataPoint( - attributes=self._attributes, - start_time_unix_nano=self._start_time_unix_nano, - time_unix_nano=collection_start_nano, - count=sum(self._previous_cumulative_value), - sum=self._previous_sum, - bucket_counts=tuple(self._previous_cumulative_value), - explicit_bounds=self._boundaries, - min=self._previous_min, - max=self._previous_max, - ) - """ - return None - with self._lock: - if self._count == 0: - return None - - current_negative = self._negative - current_positive = self._positive - current_zero_count = self._zero_count - current_count = self._count - current_start_time_unix_nano = self._start_time_unix_nano - current_sum = self._sum - current_max = self._max - if current_max == -inf: - current_max = None - current_min = self._min - if current_min == inf: - current_min = None - - if self._count == self._zero_count: - current_scale = 0 - - else: - current_scale = self._mapping.scale - - self._negative = Buckets() - self._positive = Buckets() - self._start_time_unix_nano = collection_start_nano - self._sum = 0 - self._count = 0 - self._zero_count = 0 - self._min = inf - self._max = -inf - - if self._previous_scale is None or ( - self._instrument_aggregation_temporality - is collection_aggregation_temporality - ): - self._previous_scale = current_scale - self._previous_start_time_unix_nano = ( - current_start_time_unix_nano - ) - self._previous_max = current_max - self._previous_min = current_min - self._previous_sum = current_sum - self._previous_count = current_count - self._previous_zero_count = current_zero_count - self._previous_positive = current_positive - self._previous_negative = current_negative - - current_point = ExponentialHistogramDataPoint( - attributes=self._attributes, - start_time_unix_nano=current_start_time_unix_nano, - time_unix_nano=collection_start_nano, - count=current_count, - sum=current_sum, - scale=current_scale, - zero_count=current_zero_count, - positive=BucketsPoint( - offset=current_positive.offset, - bucket_counts=current_positive.get_offset_counts(), - ), - negative=BucketsPoint( - offset=current_negative.offset, - bucket_counts=current_negative.get_offset_counts(), - ), - # FIXME: Find the right value for flags - flags=0, - min=current_min, - max=current_max, - ) - - return current_point - - min_scale = min(self._previous_scale, current_scale) - - low_positive, high_positive = self._get_low_high_previous_current( - self._previous_positive, - current_positive, - current_scale, - min_scale, - ) - low_negative, high_negative = self._get_low_high_previous_current( - self._previous_negative, - current_negative, - current_scale, - min_scale, - ) - - min_scale = min( - min_scale - - self._get_scale_change(low_positive, high_positive), - min_scale - - self._get_scale_change(low_negative, high_negative), - ) - - # FIXME Go implementation checks if the histogram (not the mapping - # but the histogram) has a count larger than zero, if not, scale - # (the histogram scale) would be zero. See exponential.go 191 - self._downscale( - self._previous_scale - min_scale, - self._previous_positive, - self._previous_negative, - ) - self._previous_scale = min_scale - - if ( - collection_aggregation_temporality - is AggregationTemporality.CUMULATIVE - ): - - start_time_unix_nano = self._previous_start_time_unix_nano - sum_ = current_sum + self._previous_sum - zero_count = current_zero_count + self._previous_zero_count - count = current_count + self._previous_count - # Only update min/max on delta -> cumulative - max_ = max(current_max, self._previous_max) - min_ = min(current_min, self._previous_min) - - self._merge( - self._previous_positive, - current_positive, - current_scale, - min_scale, - collection_aggregation_temporality, - ) - self._merge( - self._previous_negative, - current_negative, - current_scale, - min_scale, - collection_aggregation_temporality, - ) - current_scale = min_scale - - current_positive = self._previous_positive - current_negative = self._previous_negative - - else: - # We are currently not considering the case where an - # asyncrhonous instrument uses an exponential bucket - # aggregation. - start_time_unix_nano = self._previous_start_time_unix_nano - sum_ = current_sum - self._previous_sum - zero_count = current_zero_count - count = current_count - max_ = current_max - min_ = current_min - - self._merge( - self._previous_positive, - current_positive, - current_scale, - min_scale, - collection_aggregation_temporality, - ) - self._merge( - self._previous_negative, - current_negative, - current_scale, - min_scale, - collection_aggregation_temporality, - ) - - current_point = ExponentialHistogramDataPoint( - attributes=self._attributes, - start_time_unix_nano=start_time_unix_nano, - time_unix_nano=collection_start_nano, - count=count, - sum=sum_, - scale=current_scale, - zero_count=zero_count, - positive=BucketsPoint( - offset=current_positive.offset, - bucket_counts=current_positive.get_offset_counts(), - ), - negative=BucketsPoint( - offset=current_negative.offset, - bucket_counts=current_negative.get_offset_counts(), - ), - # FIXME: Find the right value for flags - flags=0, - min=min_, - max=max_, - ) - - self._previous_scale = current_scale - self._previous_positive = current_positive - self._previous_negative = current_negative - self._previous_start_time_unix_nano = current_start_time_unix_nano - self._previous_sum = sum_ - self._previous_count = count - self._previous_max = max_ - self._previous_min = min_ - self._previous_zero_count = zero_count - - return current_point - def _get_low_high_previous_current( self, previous_point_buckets, From f8f5911f93819b0c7657c00b5f0f032dc2b1e93e Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 18 Jun 2024 16:58:29 -0600 Subject: [PATCH 06/35] Fix some test cases --- .../sdk/metrics/_internal/aggregation.py | 12 +++++++++++- .../test_exponential_bucket_histogram_aggregation.py | 11 ++++++++--- 2 files changed, 19 insertions(+), 4 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 1a9cb3257cc..9499ec8267c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -578,7 +578,7 @@ def __init__( _logger.warning( "max_scale is set to %s which is " "larger than the recommended value of 20", - self._max_scale, + max_scale, ) super().__init__(attributes) @@ -1140,8 +1140,18 @@ def _create_aggregation( attributes: Attributes, start_time_unix_nano: int, ) -> _Aggregation: + + instrument_aggregation_temporality = AggregationTemporality.UNSPECIFIED + if isinstance(instrument, Synchronous): + instrument_aggregation_temporality = AggregationTemporality.DELTA + elif isinstance(instrument, Asynchronous): + instrument_aggregation_temporality = ( + AggregationTemporality.CUMULATIVE + ) + return _ExponentialBucketHistogramAggregation( attributes, + instrument_aggregation_temporality, start_time_unix_nano, max_size=self._max_size, max_scale=self._max_scale, diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index cabd7b24509..bd2bf8f1854 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -456,6 +456,10 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual(0, exponential_histogram_aggregation._sum) expect = 0 + exponential_histogram_aggregation._current_value_positive = ( + Buckets() + ) + for value in range(2, 257): expect += value * increment with patch.object( @@ -810,6 +814,8 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment + exponential_histogram_aggregation_1._current_value_positive = Buckets() + with patch.object( exponential_histogram_aggregation_1._current_value_positive, "increment_bucket", @@ -850,9 +856,6 @@ def test_one_count_by_increment(self): ) ) - # positive_mock = Mock( - # wraps=exponential_histogram_aggregation_1._current_value_positive - # ) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket @@ -860,6 +863,8 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment + exponential_histogram_aggregation_1._current_value_positive = Buckets() + with patch.object( exponential_histogram_aggregation_1._current_value_positive, "increment_bucket", From cc765babbb701e587d27ef218a59d1ca420a5a0a Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 18 Jun 2024 17:36:41 -0600 Subject: [PATCH 07/35] Relocate method --- .../sdk/metrics/_internal/aggregation.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 9499ec8267c..bdae6d46b92 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -522,12 +522,6 @@ def collect( return None -def _new_exponential_mapping(scale: int) -> Mapping: - if scale <= 0: - return ExponentMapping(scale) - return LogarithmMapping(scale) - - # pylint: disable=protected-access class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]): # _min_max_size and _max_max_size are the smallest and largest values @@ -608,7 +602,7 @@ def __init__( self._previous_count = 0 self._previous_zero_count = 0 - self._mapping = _new_exponential_mapping(self._max_scale) + self._mapping = self._new_exponential_mapping(self._max_scale) self._previous_scale = None @@ -680,7 +674,7 @@ def aggregate(self, measurement: Measurement) -> None: self._current_value_negative, ) new_scale = self._mapping.scale - scale_change - self._mapping = _new_exponential_mapping(new_scale) + self._mapping = self._new_exponential_mapping(new_scale) index = self._mapping.map_to_index(value) @@ -935,6 +929,12 @@ def _get_low_high(buckets, scale, min_scale): return buckets.index_start >> shift, buckets.index_end >> shift + @staticmethod + def _new_exponential_mapping(scale: int) -> Mapping: + if scale <= 0: + return ExponentMapping(scale) + return LogarithmMapping(scale) + def _get_scale_change(self, low, high): change = 0 From 1e69dce77fb0dcc99c0fa6cb9fe4ba49cdf49986 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 18 Jun 2024 18:03:32 -0600 Subject: [PATCH 08/35] Debugging test_aggregate_collect --- .../opentelemetry/sdk/metrics/_internal/aggregation.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index bdae6d46b92..5eb008d92f4 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -804,6 +804,9 @@ def collect( min_scale = min(self._previous_scale, scale) + from ipdb import set_trace + set_trace() + low_positive, high_positive = ( self._get_low_high_previous_current( self._previous_cumulative_value_positive, @@ -812,6 +815,9 @@ def collect( min_scale, ) ) + # running test_aggregate_collect in 3.11 + # low_positive == 1048575 + # high_positive == 1048575 low_negative, high_negative = ( self._get_low_high_previous_current( self._previous_cumulative_value_negative, @@ -820,6 +826,8 @@ def collect( min_scale, ) ) + # low_negative == 0 + # high_negative == -1 min_scale = min( min_scale From 3acf866a84093ec0c02f7ba2b92d8cac786cabe4 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 18 Jun 2024 19:02:26 -0600 Subject: [PATCH 09/35] Fix empty previous bucket handling --- .../sdk/metrics/_internal/aggregation.py | 13 +++++++++++-- .../_internal/exponential_histogram/buckets.py | 10 ++++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 5eb008d92f4..2467c9f734c 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -594,8 +594,8 @@ def __init__( self._current_value_negative = None self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_cumulative_value_positive = Buckets() - self._previous_cumulative_value_negative = Buckets() + self._previous_cumulative_value_positive = None + self._previous_cumulative_value_negative = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 @@ -802,6 +802,15 @@ def collect( if self._previous_scale is None: self._previous_scale = scale + if self._previous_cumulative_value_positive is None: + self._previous_cumulative_value_positive = ( + current_value_positive.copy_empty() + ) + if self._previous_cumulative_value_negative is None: + self._previous_cumulative_value_negative = ( + current_value_negative.copy_empty() + ) + min_scale = min(self._previous_scale, scale) from ipdb import set_trace diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py index 4dbe8f385e8..ee0060a986f 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py @@ -177,3 +177,13 @@ def downscale(self, amount: int) -> None: def increment_bucket(self, bucket_index: int, increment: int = 1) -> None: self._counts[bucket_index] += increment + + def copy_empty(self) -> "Buckets": + copy = Buckets() + + copy._Buckets__index_base = self._Buckets__index_base + copy._Buckets__index_start = self._Buckets__index_start + copy._Buckets__index_end = self._Buckets__index_end + copy._counts = [0 for _ in self._counts] + + return copy From 1582ebf6a5cecd22f2eab14bcab4ccdb9de1ad86 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 11:53:52 -0600 Subject: [PATCH 10/35] Fix max and min setting --- .../sdk/metrics/_internal/aggregation.py | 25 ++++++++----------- ...xponential_bucket_histogram_aggregation.py | 5 ++-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 2467c9f734c..e8a4d319a80 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -618,7 +618,17 @@ def aggregate(self, measurement: Measurement) -> None: if self._current_value_negative is None: self._current_value_negative = Buckets() - if value > 0: + if value < self._min: + self._min = value + + if value > self._max: + self._max = value + + if value == 0: + self._zero_count += 1 + return + + elif value > 0: current_value = self._current_value_positive else: @@ -627,18 +637,8 @@ def aggregate(self, measurement: Measurement) -> None: self._sum += value - if value < self._min: - self._min = value - - elif value > self._max: - self._max = value - self._count += 1 - if value == 0: - self._zero_count += 1 - return - index = self._mapping.map_to_index(value) is_rescaling_needed = False @@ -813,9 +813,6 @@ def collect( min_scale = min(self._previous_scale, scale) - from ipdb import set_trace - set_trace() - low_positive, high_positive = ( self._get_low_high_previous_current( self._previous_cumulative_value_positive, diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index bd2bf8f1854..0b1a75e2b28 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -1130,11 +1130,13 @@ def test_merge_collect_cumulative(self): [1, 1, 1, 1] ) - result = exponential_histogram_aggregation.collect( + result_0 = exponential_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 0, ) + self.assertEqual(result_0.scale, 0) + for value in [1, 2, 4, 8]: exponential_histogram_aggregation.aggregate( Measurement(1 / value, Mock()) @@ -1155,7 +1157,6 @@ def test_merge_collect_cumulative(self): 0, ) - self.assertEqual(result.scale, 0) self.assertEqual(result_1.scale, -1) def test_merge_collect_delta(self): From 09173fa4139e0e0f2a33aa12be6eb947f1b26ac1 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 14:39:18 -0600 Subject: [PATCH 11/35] Fix explicit bucket aggregation to make it consistent --- .../sdk/metrics/_internal/aggregation.py | 41 +++++++++++++++---- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index e8a4d319a80..b79fea711d7 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -417,7 +417,7 @@ def __init__( self._current_value = None self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_cumulative_value = self._get_empty_bucket_counts() + self._previous_cumulative_value = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 @@ -496,6 +496,11 @@ def collect( if current_value is None: current_value = self._get_empty_bucket_counts() + if self._previous_cumulative_value is None: + self._previous_cumulative_value = ( + self._get_empty_bucket_counts() + ) + self._previous_cumulative_value = [ current_value_element + previous_cumulative_value_element for ( @@ -668,6 +673,9 @@ def aggregate(self, measurement: Measurement) -> None: if is_rescaling_needed: scale_change = self._get_scale_change(low, high) + # _downscale changes the buckets. This is the main difference + # with the _ExplicitBucketHistogramAggregation, as values are + # added to the histogram, the buckets can change in size. self._downscale( scale_change, self._current_value_positive, @@ -709,6 +717,7 @@ def collect( """ Atomically return a point for the current value of the metric. """ + # pylint: disable=too-many-statements, too-many-locals with self._lock: current_value_positive = self._current_value_positive @@ -796,13 +805,32 @@ def collect( # TODO, implement this case if current_value_positive is None: + # This happens if collect is called before aggregate is + # called or if collect is called twice with no calls to + # aggregate in between. current_value_positive = Buckets() if current_value_negative is None: current_value_negative = Buckets() - if self._previous_scale is None: - self._previous_scale = scale if self._previous_cumulative_value_positive is None: + # This happens when collect is called the very first time. + + # We need previous buckets to add them to the current ones. + # When collect is called for the first time, there are no + # previous buckets, so we need to create empty buckets to + # add them to the current ones. The addition of empty + # buckets to the current ones will result in the current + # ones unchanged. + + # The way the previous buckets are generated here is + # different from the explicit bucket histogram where + # the size and amount of the buckets does not change once + # they are instantiated. Here, the size and amount of the + # buckets can change with every call to aggregate. In order + # to get empty buckets that can be added to the current + # ones resulting in the current ones unchanged we need to + # generate empty buckets that have the same size and amount + # as the current ones, this is what copy_empty does. self._previous_cumulative_value_positive = ( current_value_positive.copy_empty() ) @@ -810,6 +838,8 @@ def collect( self._previous_cumulative_value_negative = ( current_value_negative.copy_empty() ) + if self._previous_scale is None: + self._previous_scale = scale min_scale = min(self._previous_scale, scale) @@ -821,9 +851,6 @@ def collect( min_scale, ) ) - # running test_aggregate_collect in 3.11 - # low_positive == 1048575 - # high_positive == 1048575 low_negative, high_negative = ( self._get_low_high_previous_current( self._previous_cumulative_value_negative, @@ -832,8 +859,6 @@ def collect( min_scale, ) ) - # low_negative == 0 - # high_negative == -1 min_scale = min( min_scale From ec15b16991c9d849334e536f632846381787b498 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 15:27:46 -0600 Subject: [PATCH 12/35] Rearrange __init__s --- .../sdk/metrics/_internal/aggregation.py | 60 ++++++++++--------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index b79fea711d7..a774ffe0d67 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -403,25 +403,25 @@ def __init__( ): super().__init__(attributes) - self._boundaries = tuple(boundaries) - self._record_min_max = record_min_max - self._min = inf - self._max = -inf - self._sum = 0 - - self._start_time_unix_nano = start_time_unix_nano self._instrument_aggregation_temporality = ( instrument_aggregation_temporality ) + self._start_time_unix_nano = start_time_unix_nano + self._boundaries = tuple(boundaries) + self._record_min_max = record_min_max self._current_value = None + self._min = inf + self._max = -inf + self._sum = 0 - self._previous_collection_start_nano = self._start_time_unix_nano self._previous_cumulative_value = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 + self._previous_collection_start_nano = self._start_time_unix_nano + def _get_empty_bucket_counts(self) -> List[int]: return [0] * (len(self._boundaries) + 1) @@ -582,23 +582,22 @@ def __init__( super().__init__(attributes) + self._instrument_aggregation_temporality = ( + instrument_aggregation_temporality + ) + self._start_time_unix_nano = start_time_unix_nano self._max_size = max_size self._max_scale = max_scale + + self._current_value_positive = None + self._current_value_negative = None self._min = inf self._max = -inf self._sum = 0 self._count = 0 self._zero_count = 0 + self._scale = 0 - self._start_time_unix_nano = start_time_unix_nano - self._instrument_aggregation_temporality = ( - instrument_aggregation_temporality - ) - - self._current_value_positive = None - self._current_value_negative = None - - self._previous_collection_start_nano = self._start_time_unix_nano self._previous_cumulative_value_positive = None self._previous_cumulative_value_negative = None self._previous_min = inf @@ -606,10 +605,11 @@ def __init__( self._previous_sum = 0 self._previous_count = 0 self._previous_zero_count = 0 + self._previous_scale = 0 - self._mapping = self._new_exponential_mapping(self._max_scale) + self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_scale = None + self._mapping = self._new_mapping(self._max_scale) def aggregate(self, measurement: Measurement) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals @@ -629,6 +629,8 @@ def aggregate(self, measurement: Measurement) -> None: if value > self._max: self._max = value + self._count += 1 + if value == 0: self._zero_count += 1 return @@ -642,8 +644,6 @@ def aggregate(self, measurement: Measurement) -> None: self._sum += value - self._count += 1 - index = self._mapping.map_to_index(value) is_rescaling_needed = False @@ -681,11 +681,17 @@ def aggregate(self, measurement: Measurement) -> None: self._current_value_positive, self._current_value_negative, ) - new_scale = self._mapping.scale - scale_change - self._mapping = self._new_exponential_mapping(new_scale) + self._mapping = self._new_mapping( + self._mapping.scale - scale_change + ) index = self._mapping.map_to_index(value) + if self._count == self._zero_count: + self._scale = 0 + else: + self._scale = self._mapping.scale + if index < current_value.index_start: span = current_value.index_end - index @@ -727,10 +733,7 @@ def collect( max_ = self._max count = self._count zero_count = self._zero_count - if self._count == self._zero_count: - scale = 0 - else: - scale = self._mapping.scale + scale = self._scale self._current_value_positive = None self._current_value_negative = None @@ -739,6 +742,7 @@ def collect( self._max = -inf self._count = 0 self._zero_count = 0 + self._scale = 0 if ( self._instrument_aggregation_temporality @@ -969,7 +973,7 @@ def _get_low_high(buckets, scale, min_scale): return buckets.index_start >> shift, buckets.index_end >> shift @staticmethod - def _new_exponential_mapping(scale: int) -> Mapping: + def _new_mapping(scale: int) -> Mapping: if scale <= 0: return ExponentMapping(scale) return LogarithmMapping(scale) From 5aa017b75110e806ec1ded22bd8b686742c47001 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 15:41:30 -0600 Subject: [PATCH 13/35] Set scale in aggregate --- .../sdk/metrics/_internal/aggregation.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index a774ffe0d67..d14632a7d49 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -629,10 +629,16 @@ def aggregate(self, measurement: Measurement) -> None: if value > self._max: self._max = value + self._sum += value + self._count += 1 if value == 0: self._zero_count += 1 + + if self._count == self._zero_count: + self._scale = 0 + return elif value > 0: @@ -642,8 +648,6 @@ def aggregate(self, measurement: Measurement) -> None: value = -value current_value = self._current_value_negative - self._sum += value - index = self._mapping.map_to_index(value) is_rescaling_needed = False @@ -687,10 +691,7 @@ def aggregate(self, measurement: Measurement) -> None: index = self._mapping.map_to_index(value) - if self._count == self._zero_count: - self._scale = 0 - else: - self._scale = self._mapping.scale + self._scale = self._mapping.scale if index < current_value.index_start: span = current_value.index_end - index From cfc3ee35f79eab67d221b58ea16240e4e27b8887 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 15:53:56 -0600 Subject: [PATCH 14/35] Use right values in exponential point --- .../sdk/metrics/_internal/aggregation.py | 27 +++++++++++-------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index d14632a7d49..3597bcd8a06 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -843,8 +843,6 @@ def collect( self._previous_cumulative_value_negative = ( current_value_negative.copy_empty() ) - if self._previous_scale is None: - self._previous_scale = scale min_scale = min(self._previous_scale, scale) @@ -895,8 +893,6 @@ def collect( collection_aggregation_temporality, ) - self._previous_scale = min_scale - self._previous_cumulative_value_positive = ( current_value_positive ) @@ -910,6 +906,7 @@ def collect( self._previous_zero_count = ( zero_count + self._previous_zero_count ) + self._previous_scale = min_scale return ExponentialHistogramDataPoint( attributes=self._attributes, @@ -917,20 +914,28 @@ def collect( time_unix_nano=collection_start_nano, count=self._previous_count, sum=self._previous_sum, - scale=scale, + scale=self._previous_scale, zero_count=self._previous_zero_count, positive=BucketsPoint( - offset=current_value_positive.offset, - bucket_counts=current_value_positive.get_offset_counts(), + offset=self._previous_cumulative_value_positive.offset, + bucket_counts=( + self. + _previous_cumulative_value_positive. + get_offset_counts() + ), ), negative=BucketsPoint( - offset=current_value_negative.offset, - bucket_counts=current_value_negative.get_offset_counts(), + offset=self._previous_cumulative_value_negative.offset, + bucket_counts=( + self. + _previous_cumulative_value_negative. + get_offset_counts() + ), ), # FIXME: Find the right value for flags flags=0, - min=min_, - max=max_, + min=self._previous_min, + max=self._previous_max, ) return None From 2d6f7d4d2f7d36ece4a0972880cf57c15e61889e Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 16:12:46 -0600 Subject: [PATCH 15/35] Set scale right --- .../src/opentelemetry/sdk/metrics/_internal/aggregation.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 3597bcd8a06..e98e4654e98 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -605,7 +605,7 @@ def __init__( self._previous_sum = 0 self._previous_count = 0 self._previous_zero_count = 0 - self._previous_scale = 0 + self._previous_scale = None self._previous_collection_start_nano = self._start_time_unix_nano @@ -843,6 +843,8 @@ def collect( self._previous_cumulative_value_negative = ( current_value_negative.copy_empty() ) + if self._previous_scale is None: + self._previous_scale = scale min_scale = min(self._previous_scale, scale) From 8dc95a51159a80789f82fa1bf24bbadaf27020ea Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 16:22:30 -0600 Subject: [PATCH 16/35] Start scale as None --- .../src/opentelemetry/sdk/metrics/_internal/aggregation.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index e98e4654e98..ee6eb60db7a 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -596,7 +596,7 @@ def __init__( self._sum = 0 self._count = 0 self._zero_count = 0 - self._scale = 0 + self._scale = None self._previous_cumulative_value_positive = None self._previous_cumulative_value_negative = None @@ -743,7 +743,7 @@ def collect( self._max = -inf self._count = 0 self._zero_count = 0 - self._scale = 0 + self._scale = None if ( self._instrument_aggregation_temporality From 460a4d2f339b8c307e04f510dbbe7e2aaeba7a8e Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 17:05:27 -0600 Subject: [PATCH 17/35] Make test_collect_results_cumulative pass I am not sure these changes are right, I just wanted to find what would be the value that would make this test case pass. --- .../test_exponential_bucket_histogram_aggregation.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index 0b1a75e2b28..581011914c5 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -1046,11 +1046,11 @@ def test_collect_results_cumulative(self) -> None: *[0] * 36, 1, *[0] * 15, - 2, - *[0] * 15, 1, *[0] * 15, - 1, + 0, + *[0] * 15, + 0, *[0] * 15, 1, *[0] * 40, From a9d4f59632da6fa27f105ee2ac5551cbc0b66f18 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 17:18:36 -0600 Subject: [PATCH 18/35] Actually use random values --- ...t_exponential_bucket_histogram_aggregation.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index 581011914c5..4e553368d69 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -15,11 +15,12 @@ # pylint: disable=protected-access,too-many-lines,invalid-name # pylint: disable=consider-using-enumerate,no-self-use,too-many-public-methods -import random as insecure_random +from random import Random, randrange +from inspect import currentframe from itertools import permutations from logging import WARNING from math import ldexp -from sys import float_info +from sys import float_info, maxsize from types import MethodType from unittest.mock import Mock, patch @@ -1097,10 +1098,17 @@ def collect_and_validate() -> None: assert result.zero_count == len([v for v in values if v == 0]) assert scale >= 3 - random = insecure_random.Random("opentelemetry2") + seed = randrange(maxsize) + # This test case is executed with random values every time. In order to + # run this test case with the same values used in a previous execution, + # check the value printed by that previous execution of this test case + # and use the same value for the seed variable in the line below. + # seed = 4539544373807492135 + print(f"seed for {currentframe().f_code.co_name} is {seed}") + values = [] for i in range(2000): - value = random.randint(0, 1000) + value = Random(seed).randint(0, 1000) values.append(value) histogram.aggregate(Measurement(value, Mock())) if i % 20 == 0: From 5d588c3649738acf1dcd98e56a8dd784076e3aaa Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 18:02:34 -0600 Subject: [PATCH 19/35] Add integration test for exponential histogram --- ...xponential_bucket_histogram_aggregation.py | 256 ++++++++++++++++++ 1 file changed, 256 insertions(+) create mode 100644 opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram_aggregation.py diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram_aggregation.py new file mode 100644 index 00000000000..eee304d17de --- /dev/null +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram_aggregation.py @@ -0,0 +1,256 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from platform import system +from unittest import TestCase + +from pytest import mark + +from opentelemetry.sdk.metrics import Histogram, MeterProvider +from opentelemetry.sdk.metrics.export import ( + AggregationTemporality, + InMemoryMetricReader, +) +from opentelemetry.sdk.metrics.view import ( + ExponentialBucketHistogramAggregation +) + + +class TestExponentialBucketHistogramAggregation(TestCase): + + test_values = [1, 6, 11, 26, 51, 76, 101, 251, 501, 751] + + @mark.skipif( + system() == "Windows", + reason=( + "Tests fail because Windows time_ns resolution is too low so " + "two different time measurements may end up having the exact same" + "value." + ), + ) + def test_synchronous_delta_temporality(self): + + aggregation = ExponentialBucketHistogramAggregation() + + reader = InMemoryMetricReader( + preferred_aggregation={Histogram: aggregation}, + preferred_temporality={Histogram: AggregationTemporality.DELTA}, + ) + + provider = MeterProvider(metric_readers=[reader]) + meter = provider.get_meter("name", "version") + + histogram = meter.create_histogram("histogram") + + results = [] + + for _ in range(10): + + results.append(reader.get_metrics_data()) + + for metrics_data in results: + self.assertIsNone(metrics_data) + + results = [] + + for test_value in self.test_values: + histogram.record(test_value) + results.append(reader.get_metrics_data()) + + metric_data = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + previous_time_unix_nano = metric_data.time_unix_nano + + """ + self.assertEqual( + metric_data.bucket_counts, + (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + ) + """ + + self.assertLess( + metric_data.start_time_unix_nano, + previous_time_unix_nano, + ) + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[0]) + self.assertEqual(metric_data.sum, self.test_values[0]) + + for index, metrics_data in enumerate(results[1:]): + metric_data = ( + metrics_data.resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertEqual( + previous_time_unix_nano, metric_data.start_time_unix_nano + ) + previous_time_unix_nano = metric_data.time_unix_nano + """ + self.assertEqual( + metric_data.bucket_counts, + tuple( + [ + 1 if internal_index == index + 2 else 0 + for internal_index in range(16) + ] + ), + ) + """ + self.assertLess( + metric_data.start_time_unix_nano, metric_data.time_unix_nano + ) + self.assertEqual(metric_data.min, self.test_values[index + 1]) + self.assertEqual(metric_data.max, self.test_values[index + 1]) + self.assertEqual(metric_data.sum, self.test_values[index + 1]) + + results = [] + + for _ in range(10): + + results.append(reader.get_metrics_data()) + + provider.shutdown() + + for metrics_data in results: + self.assertIsNone(metrics_data) + + @mark.skipif( + system() == "Windows", + reason=( + "Tests fail because Windows time_ns resolution is too low so " + "two different time measurements may end up having the exact same" + "value." + ), + ) + def test_synchronous_cumulative_temporality(self): + + aggregation = ExponentialBucketHistogramAggregation() + + reader = InMemoryMetricReader( + preferred_aggregation={Histogram: aggregation}, + preferred_temporality={ + Histogram: AggregationTemporality.CUMULATIVE + }, + ) + + provider = MeterProvider(metric_readers=[reader]) + meter = provider.get_meter("name", "version") + + histogram = meter.create_histogram("histogram") + + results = [] + + for _ in range(10): + + results.append(reader.get_metrics_data()) + + for metrics_data in results: + self.assertIsNone(metrics_data) + + results = [] + + for test_value in self.test_values: + + histogram.record(test_value) + results.append(reader.get_metrics_data()) + + start_time_unix_nano = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + .start_time_unix_nano + ) + + for index, metrics_data in enumerate(results): + + metric_data = ( + metrics_data.resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertEqual( + start_time_unix_nano, metric_data.start_time_unix_nano + ) + """ + self.assertEqual( + metric_data.bucket_counts, + tuple( + [ + ( + 0 + if internal_index < 1 or internal_index > index + 1 + else 1 + ) + for internal_index in range(16) + ] + ), + ) + """ + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[index]) + self.assertEqual( + metric_data.sum, sum(self.test_values[: index + 1]) + ) + + results = [] + + for i in range(10): + + results.append(reader.get_metrics_data()) + + provider.shutdown() + + start_time_unix_nano = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + .start_time_unix_nano + ) + + for metrics_data in results: + + metric_data = ( + metrics_data.resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertEqual( + start_time_unix_nano, metric_data.start_time_unix_nano + ) + """ + self.assertEqual( + metric_data.bucket_counts, + (0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0), + ) + """ + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[-1]) + self.assertEqual(metric_data.sum, sum(self.test_values)) From e4e144bcc17fb544e4b18f887494c067a7a7fff2 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 19 Jun 2024 18:16:00 -0600 Subject: [PATCH 20/35] Handle all cases for current and previous buckets and scale --- .../sdk/metrics/_internal/aggregation.py | 56 ++++++++++++++++--- 1 file changed, 47 insertions(+), 9 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index ee6eb60db7a..f094ceb5214 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -809,16 +809,34 @@ def collect( # TODO, implement this case - if current_value_positive is None: - # This happens if collect is called before aggregate is - # called or if collect is called twice with no calls to - # aggregate in between. + if ( + current_value_positive is None and + self._previous_cumulative_value_positive is None + ): + # This happens if collect is called for the first time + # and aggregate has not yet been called. current_value_positive = Buckets() - if current_value_negative is None: + self._previous_cumulative_value_positive = ( + current_value_positive.copy_empty() + ) + if ( + current_value_negative is None and + self._previous_cumulative_value_negative is None + ): current_value_negative = Buckets() + self._previous_cumulative_value_negative = ( + current_value_negative.copy_empty() + ) + if scale is None and self._previous_scale is None: + scale = self._mapping.scale + self._previous_scale = scale - if self._previous_cumulative_value_positive is None: - # This happens when collect is called the very first time. + if ( + current_value_positive is not None and + self._previous_cumulative_value_positive is None + ): + # This happens when collect is called the very first time + # and aggregate has been called before. # We need previous buckets to add them to the current ones. # When collect is called for the first time, there are no @@ -839,13 +857,33 @@ def collect( self._previous_cumulative_value_positive = ( current_value_positive.copy_empty() ) - if self._previous_cumulative_value_negative is None: + if ( + current_value_negative is not None and + self._previous_cumulative_value_negative is None + ): self._previous_cumulative_value_negative = ( current_value_negative.copy_empty() ) - if self._previous_scale is None: + if scale is not None and self._previous_scale is None: self._previous_scale = scale + if ( + current_value_positive is None and + self._previous_cumulative_value_positive is not None + ): + current_value_positive = ( + self._previous_cumulative_value_positive.copy_empty() + ) + if ( + current_value_negative is None and + self._previous_cumulative_value_negative is not None + ): + current_value_negative = ( + self._previous_cumulative_value_negative.copy_empty() + ) + if scale is None and self._previous_scale is not None: + scale = self._previous_scale + min_scale = min(self._previous_scale, scale) low_positive, high_positive = ( From 17e0881e41279c5b8598bea318e5826467a89a1c Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 24 Jun 2024 11:26:00 -0600 Subject: [PATCH 21/35] Rename test module --- ...togram_aggregation.py => test_exponential_bucket_histogram.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename opentelemetry-sdk/tests/metrics/integration_test/{test_exponential_bucket_histogram_aggregation.py => test_exponential_bucket_histogram.py} (100%) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py similarity index 100% rename from opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram_aggregation.py rename to opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py From 7a707304d39e599ef777bd1fa48288cd5472a7d4 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 24 Jun 2024 15:58:45 -0600 Subject: [PATCH 22/35] Use random values --- .../test_exponential_bucket_histogram_aggregation.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index 4e553368d69..92e5a404fa7 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -1068,7 +1068,7 @@ def test_cumulative_aggregation_with_random_data(self) -> None: Mock(), ) - def collect_and_validate() -> None: + def collect_and_validate(values, histogram) -> None: result: ExponentialHistogramDataPoint = histogram.collect( AggregationTemporality.CUMULATIVE, 0 ) @@ -1103,18 +1103,20 @@ def collect_and_validate() -> None: # run this test case with the same values used in a previous execution, # check the value printed by that previous execution of this test case # and use the same value for the seed variable in the line below. - # seed = 4539544373807492135 + seed = 4539544373807492135 + + random_generator = Random(seed) print(f"seed for {currentframe().f_code.co_name} is {seed}") values = [] for i in range(2000): - value = Random(seed).randint(0, 1000) + value = random_generator.randint(0, 1000) values.append(value) histogram.aggregate(Measurement(value, Mock())) if i % 20 == 0: - collect_and_validate() + collect_and_validate(values, histogram) - collect_and_validate() + collect_and_validate(values, histogram) def test_merge_collect_cumulative(self): exponential_histogram_aggregation = ( From 8455bb0b63e5badea21a2d5566a354c4cf5257e4 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 24 Jun 2024 16:40:59 -0600 Subject: [PATCH 23/35] Fix bucket setting --- .../src/opentelemetry/sdk/metrics/_internal/aggregation.py | 6 ------ .../test_exponential_bucket_histogram_aggregation.py | 6 +++--- 2 files changed, 3 insertions(+), 9 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index f094ceb5214..729fbedc7cd 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -933,12 +933,6 @@ def collect( collection_aggregation_temporality, ) - self._previous_cumulative_value_positive = ( - current_value_positive - ) - self._previous_cumulative_value_negative = ( - current_value_negative - ) self._previous_min = min(min_, self._previous_min) self._previous_max = max(max_, self._previous_max) self._previous_sum = sum_ + self._previous_sum diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index 92e5a404fa7..e950213d379 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -1047,11 +1047,11 @@ def test_collect_results_cumulative(self) -> None: *[0] * 36, 1, *[0] * 15, - 1, + 2, *[0] * 15, - 0, + 1, *[0] * 15, - 0, + 1, *[0] * 15, 1, *[0] * 40, From 0adc8e0d995663c23e1dd5b0462971ef85e42fa6 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 24 Jun 2024 17:43:23 -0600 Subject: [PATCH 24/35] WIP integration test --- .../test_exponential_bucket_histogram.py | 130 +++++++++++------- 1 file changed, 78 insertions(+), 52 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index eee304d17de..11bdef29f89 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -29,8 +29,6 @@ class TestExponentialBucketHistogramAggregation(TestCase): - test_values = [1, 6, 11, 26, 51, 76, 101, 251, 501, 751] - @mark.skipif( system() == "Windows", reason=( @@ -41,6 +39,8 @@ class TestExponentialBucketHistogramAggregation(TestCase): ) def test_synchronous_delta_temporality(self): + test_values = [1, 6, 11, 26, 51, 76, 101, 251, 501, 751] + aggregation = ExponentialBucketHistogramAggregation() reader = InMemoryMetricReader( @@ -64,7 +64,7 @@ def test_synchronous_delta_temporality(self): results = [] - for test_value in self.test_values: + for test_value in test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) @@ -78,20 +78,22 @@ def test_synchronous_delta_temporality(self): previous_time_unix_nano = metric_data.time_unix_nano - """ self.assertEqual( - metric_data.bucket_counts, - (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), + metric_data.positive.bucket_counts, + [1] + ) + self.assertEqual( + metric_data.negative.bucket_counts, + [0] ) - """ self.assertLess( metric_data.start_time_unix_nano, previous_time_unix_nano, ) - self.assertEqual(metric_data.min, self.test_values[0]) - self.assertEqual(metric_data.max, self.test_values[0]) - self.assertEqual(metric_data.sum, self.test_values[0]) + self.assertEqual(metric_data.min, test_values[0]) + self.assertEqual(metric_data.max, test_values[0]) + self.assertEqual(metric_data.sum, test_values[0]) for index, metrics_data in enumerate(results[1:]): metric_data = ( @@ -105,23 +107,20 @@ def test_synchronous_delta_temporality(self): previous_time_unix_nano, metric_data.start_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano - """ self.assertEqual( - metric_data.bucket_counts, - tuple( - [ - 1 if internal_index == index + 2 else 0 - for internal_index in range(16) - ] - ), + metric_data.positive.bucket_counts, + [1] + ) + self.assertEqual( + metric_data.negative.bucket_counts, + [0] ) - """ self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) - self.assertEqual(metric_data.min, self.test_values[index + 1]) - self.assertEqual(metric_data.max, self.test_values[index + 1]) - self.assertEqual(metric_data.sum, self.test_values[index + 1]) + self.assertEqual(metric_data.min, test_values[index + 1]) + self.assertEqual(metric_data.max, test_values[index + 1]) + self.assertEqual(metric_data.sum, test_values[index + 1]) results = [] @@ -144,6 +143,8 @@ def test_synchronous_delta_temporality(self): ) def test_synchronous_cumulative_temporality(self): + test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] + aggregation = ExponentialBucketHistogramAggregation() reader = InMemoryMetricReader( @@ -169,7 +170,7 @@ def test_synchronous_cumulative_temporality(self): results = [] - for test_value in self.test_values: + for test_value in test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) @@ -195,26 +196,33 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) - """ - self.assertEqual( - metric_data.bucket_counts, - tuple( - [ - ( - 0 - if internal_index < 1 or internal_index > index + 1 - else 1 - ) - for internal_index in range(16) - ] - ), - ) - """ - self.assertEqual(metric_data.min, self.test_values[0]) - self.assertEqual(metric_data.max, self.test_values[index]) - self.assertEqual( - metric_data.sum, sum(self.test_values[: index + 1]) - ) + self.assertEqual(metric_data.min, min(test_values[:index + 1])) + self.assertEqual(metric_data.max, max(test_values[:index + 1])) + self.assertEqual(metric_data.sum, sum(test_values[:index + 1])) + + self.assertEqual( + metric_data.positive.bucket_counts, + [ + 1, + *[0] * 17, + 1, + *[0] * 36, + 1, + *[0] * 15, + 2, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 40, + ], + ) + self.assertEqual( + metric_data.negative.bucket_counts, + [0] + ) results = [] @@ -245,12 +253,30 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) - """ - self.assertEqual( - metric_data.bucket_counts, - (0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0), - ) - """ - self.assertEqual(metric_data.min, self.test_values[0]) - self.assertEqual(metric_data.max, self.test_values[-1]) - self.assertEqual(metric_data.sum, sum(self.test_values)) + self.assertEqual(metric_data.min, min(test_values[:index + 1])) + self.assertEqual(metric_data.max, max(test_values[:index + 1])) + self.assertEqual(metric_data.sum, sum(test_values[:index + 1])) + + self.assertEqual( + metric_data.positive.bucket_counts, + [ + 1, + *[0] * 17, + 1, + *[0] * 36, + 1, + *[0] * 15, + 2, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 40, + ], + ) + self.assertEqual( + metric_data.negative.bucket_counts, + [0] + ) From e33e46a97eb8f68618a39a77213c1b113bcd4e48 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 24 Jun 2024 17:54:05 -0600 Subject: [PATCH 25/35] WIP --- .../test_exponential_bucket_histogram.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index 11bdef29f89..cbe1298c748 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -29,6 +29,8 @@ class TestExponentialBucketHistogramAggregation(TestCase): + test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] + @mark.skipif( system() == "Windows", reason=( @@ -39,8 +41,6 @@ class TestExponentialBucketHistogramAggregation(TestCase): ) def test_synchronous_delta_temporality(self): - test_values = [1, 6, 11, 26, 51, 76, 101, 251, 501, 751] - aggregation = ExponentialBucketHistogramAggregation() reader = InMemoryMetricReader( @@ -64,7 +64,7 @@ def test_synchronous_delta_temporality(self): results = [] - for test_value in test_values: + for test_value in self.test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) @@ -91,9 +91,9 @@ def test_synchronous_delta_temporality(self): metric_data.start_time_unix_nano, previous_time_unix_nano, ) - self.assertEqual(metric_data.min, test_values[0]) - self.assertEqual(metric_data.max, test_values[0]) - self.assertEqual(metric_data.sum, test_values[0]) + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[0]) + self.assertEqual(metric_data.sum, self.test_values[0]) for index, metrics_data in enumerate(results[1:]): metric_data = ( @@ -118,9 +118,9 @@ def test_synchronous_delta_temporality(self): self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) - self.assertEqual(metric_data.min, test_values[index + 1]) - self.assertEqual(metric_data.max, test_values[index + 1]) - self.assertEqual(metric_data.sum, test_values[index + 1]) + self.assertEqual(metric_data.min, self.test_values[index + 1]) + self.assertEqual(metric_data.max, self.test_values[index + 1]) + self.assertEqual(metric_data.sum, self.test_values[index + 1]) results = [] @@ -143,7 +143,7 @@ def test_synchronous_delta_temporality(self): ) def test_synchronous_cumulative_temporality(self): - test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] + self.test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] aggregation = ExponentialBucketHistogramAggregation() @@ -170,7 +170,7 @@ def test_synchronous_cumulative_temporality(self): results = [] - for test_value in test_values: + for test_value in self.test_values: histogram.record(test_value) results.append(reader.get_metrics_data()) @@ -196,9 +196,9 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) - self.assertEqual(metric_data.min, min(test_values[:index + 1])) - self.assertEqual(metric_data.max, max(test_values[:index + 1])) - self.assertEqual(metric_data.sum, sum(test_values[:index + 1])) + self.assertEqual(metric_data.min, min(self.test_values[:index + 1])) + self.assertEqual(metric_data.max, max(self.test_values[:index + 1])) + self.assertEqual(metric_data.sum, sum(self.test_values[:index + 1])) self.assertEqual( metric_data.positive.bucket_counts, @@ -253,9 +253,9 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) - self.assertEqual(metric_data.min, min(test_values[:index + 1])) - self.assertEqual(metric_data.max, max(test_values[:index + 1])) - self.assertEqual(metric_data.sum, sum(test_values[:index + 1])) + self.assertEqual(metric_data.min, min(self.test_values[:index + 1])) + self.assertEqual(metric_data.max, max(self.test_values[:index + 1])) + self.assertEqual(metric_data.sum, sum(self.test_values[:index + 1])) self.assertEqual( metric_data.positive.bucket_counts, From 9ba466ef7aeb5b5371ddc19cb9d23cd4de7a8ea7 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 25 Jun 2024 16:42:44 -0600 Subject: [PATCH 26/35] Finish integration tests --- .../test_exponential_bucket_histogram.py | 129 ++++++++++++------ 1 file changed, 88 insertions(+), 41 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index cbe1298c748..d6d6799fd94 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -143,8 +143,6 @@ def test_synchronous_delta_temporality(self): ) def test_synchronous_cumulative_temporality(self): - self.test_values = [2, 4, 1, 1, 8, 0.5, 0.1, 0.045] - aggregation = ExponentialBucketHistogramAggregation() reader = InMemoryMetricReader( @@ -162,7 +160,6 @@ def test_synchronous_cumulative_temporality(self): results = [] for _ in range(10): - results.append(reader.get_metrics_data()) for metrics_data in results: @@ -171,21 +168,30 @@ def test_synchronous_cumulative_temporality(self): results = [] for test_value in self.test_values: - histogram.record(test_value) results.append(reader.get_metrics_data()) - start_time_unix_nano = ( + metric_data = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] - .start_time_unix_nano ) - for index, metrics_data in enumerate(results): + start_time_unix_nano = metric_data.start_time_unix_nano + self.assertLess( + metric_data.start_time_unix_nano, + metric_data.time_unix_nano, + ) + self.assertEqual(metric_data.min, self.test_values[0]) + self.assertEqual(metric_data.max, self.test_values[0]) + self.assertEqual(metric_data.sum, self.test_values[0]) + + previous_time_unix_nano = metric_data.time_unix_nano + + for index, metrics_data in enumerate(results[1:]): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] @@ -196,9 +202,26 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( start_time_unix_nano, metric_data.start_time_unix_nano ) - self.assertEqual(metric_data.min, min(self.test_values[:index + 1])) - self.assertEqual(metric_data.max, max(self.test_values[:index + 1])) - self.assertEqual(metric_data.sum, sum(self.test_values[:index + 1])) + self.assertLess( + metric_data.start_time_unix_nano, + metric_data.time_unix_nano, + ) + self.assertEqual( + metric_data.min, min(self.test_values[:index + 2]) + ) + self.assertEqual( + metric_data.max, max(self.test_values[:index + 2]) + ) + self.assertEqual( + metric_data.sum, sum(self.test_values[:index + 2]) + ) + + self.assertGreater( + metric_data.time_unix_nano, + previous_time_unix_nano + ) + + previous_time_unix_nano = metric_data.time_unix_nano self.assertEqual( metric_data.positive.bucket_counts, @@ -227,22 +250,31 @@ def test_synchronous_cumulative_temporality(self): results = [] for i in range(10): - results.append(reader.get_metrics_data()) provider.shutdown() - start_time_unix_nano = ( + metric_data = ( results[0] .resource_metrics[0] .scope_metrics[0] .metrics[0] .data.data_points[0] - .start_time_unix_nano ) - for metrics_data in results: + start_time_unix_nano = metric_data.start_time_unix_nano + self.assertLess( + metric_data.start_time_unix_nano, + metric_data.time_unix_nano, + ) + self.assertEqual(metric_data.min, min(self.test_values)) + self.assertEqual(metric_data.max, max(self.test_values)) + self.assertEqual(metric_data.sum, sum(self.test_values)) + + previous_metric_data = metric_data + + for index, metrics_data in enumerate(results[1:]): metric_data = ( metrics_data.resource_metrics[0] .scope_metrics[0] @@ -251,32 +283,47 @@ def test_synchronous_cumulative_temporality(self): ) self.assertEqual( - start_time_unix_nano, metric_data.start_time_unix_nano + previous_metric_data.start_time_unix_nano, + metric_data.start_time_unix_nano + ) + self.assertEqual( + previous_metric_data.min, + metric_data.min + ) + self.assertEqual( + previous_metric_data.max, + metric_data.max + ) + self.assertEqual( + previous_metric_data.sum, + metric_data.sum ) - self.assertEqual(metric_data.min, min(self.test_values[:index + 1])) - self.assertEqual(metric_data.max, max(self.test_values[:index + 1])) - self.assertEqual(metric_data.sum, sum(self.test_values[:index + 1])) - self.assertEqual( - metric_data.positive.bucket_counts, - [ - 1, - *[0] * 17, - 1, - *[0] * 36, - 1, - *[0] * 15, - 2, - *[0] * 15, - 1, - *[0] * 15, - 1, - *[0] * 15, - 1, - *[0] * 40, - ], - ) - self.assertEqual( - metric_data.negative.bucket_counts, - [0] - ) + self.assertEqual( + metric_data.positive.bucket_counts, + [ + 1, + *[0] * 17, + 1, + *[0] * 36, + 1, + *[0] * 15, + 2, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 15, + 1, + *[0] * 40, + ], + ) + self.assertEqual( + metric_data.negative.bucket_counts, + [0] + ) + + self.assertLess( + previous_metric_data.time_unix_nano, + metric_data.time_unix_nano, + ) From 056f93f122b1322a1a6b7f9606a26dfc46fbe917 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 25 Jun 2024 16:53:57 -0600 Subject: [PATCH 27/35] Rename variables --- .../sdk/metrics/_internal/aggregation.py | 305 +++++++++--------- ...xponential_bucket_histogram_aggregation.py | 132 ++++---- .../tests/metrics/test_aggregation.py | 16 +- 3 files changed, 228 insertions(+), 225 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index 729fbedc7cd..a2f5ac04882 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -126,17 +126,17 @@ def __init__( ) self._instrument_is_monotonic = instrument_is_monotonic - self._current_value = None + self._value = None self._previous_collection_start_nano = self._start_time_unix_nano - self._previous_cumulative_value = 0 + self._previous_value = 0 def aggregate(self, measurement: Measurement) -> None: with self._lock: - if self._current_value is None: - self._current_value = 0 + if self._value is None: + self._value = 0 - self._current_value = self._current_value + measurement.value + self._value = self._value + measurement.value def collect( self, @@ -204,15 +204,15 @@ def collect( When the collection aggregation temporality does not match the instrument aggregation temporality, then a conversion is made. For this purpose, this aggregation keeps a private attribute, - self._previous_cumulative. + self._previous_value. When the instrument is synchronous: - self._previous_cumulative_value is the sum of every previously + self._previous_value is the sum of every previously collected (delta) value. In this case, the returned (cumulative) value will be: - self._previous_cumulative_value + current_value + self._previous_value + value synchronous_instrument.add(2) collect(CUMULATIVE) -> 2 @@ -225,10 +225,10 @@ def collect( time -> - self._previous_cumulative_value + self._previous_value |-------------| - current_value (delta) + value (delta) |----| returned value (cumulative) @@ -236,11 +236,11 @@ def collect( When the instrument is asynchronous: - self._previous_cumulative_value is the value of the previously + self._previous_value is the value of the previously collected (cumulative) value. In this case, the returned (delta) value will be: - current_value - self._previous_cumulative_value + value - self._previous_value callback() -> 1352 collect(DELTA) -> 1352 @@ -253,10 +253,10 @@ def collect( time -> - self._previous_cumulative_value + self._previous_value |-------------| - current_value (cumulative) + value (cumulative) |------------------| returned value (delta) @@ -264,8 +264,8 @@ def collect( """ with self._lock: - current_value = self._current_value - self._current_value = None + value = self._value + self._value = None if ( self._instrument_aggregation_temporality @@ -278,6 +278,9 @@ def collect( is AggregationTemporality.DELTA ): + if value is None: + return None + previous_collection_start_nano = ( self._previous_collection_start_nano ) @@ -292,27 +295,27 @@ def collect( attributes=self._attributes, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, - value=current_value, + value=value, ) - if current_value is None: - current_value = 0 + if value is None: + value = 0 - self._previous_cumulative_value = ( - current_value + self._previous_cumulative_value + self._previous_value = ( + value + self._previous_value ) return NumberDataPoint( attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, - value=self._previous_cumulative_value, + value=self._previous_value, ) # This happens when the corresponding instrument for this # aggregation is asynchronous. - if current_value is None: + if value is None: # This happens when the corresponding instrument callback # does not produce measurements. return None @@ -321,9 +324,9 @@ def collect( collection_aggregation_temporality is AggregationTemporality.DELTA ): - result_value = current_value - self._previous_cumulative_value + result_value = value - self._previous_value - self._previous_cumulative_value = current_value + self._previous_value = value previous_collection_start_nano = ( self._previous_collection_start_nano @@ -341,7 +344,7 @@ def collect( attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, - value=current_value, + value=value, ) @@ -410,12 +413,12 @@ def __init__( self._boundaries = tuple(boundaries) self._record_min_max = record_min_max - self._current_value = None + self._value = None self._min = inf self._max = -inf self._sum = 0 - self._previous_cumulative_value = None + self._previous_value = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 @@ -426,19 +429,20 @@ def _get_empty_bucket_counts(self) -> List[int]: return [0] * (len(self._boundaries) + 1) def aggregate(self, measurement: Measurement) -> None: + with self._lock: - if self._current_value is None: - self._current_value = self._get_empty_bucket_counts() + if self._value is None: + self._value = self._get_empty_bucket_counts() - value = measurement.value + measurement_value = measurement.value - self._sum += value + self._sum += measurement_value if self._record_min_max: - self._min = min(self._min, value) - self._max = max(self._max, value) + self._min = min(self._min, measurement_value) + self._max = max(self._max, measurement_value) - self._current_value[bisect_left(self._boundaries, value)] += 1 + self._value[bisect_left(self._boundaries, measurement_value)] += 1 def collect( self, @@ -450,12 +454,12 @@ def collect( """ with self._lock: - current_value = self._current_value + value = self._value sum_ = self._sum min_ = self._min max_ = self._max - self._current_value = None + self._value = None self._sum = 0 self._min = inf self._max = -inf @@ -471,6 +475,9 @@ def collect( is AggregationTemporality.DELTA ): + if value is None: + return None + previous_collection_start_nano = ( self._previous_collection_start_nano ) @@ -485,28 +492,28 @@ def collect( attributes=self._attributes, start_time_unix_nano=previous_collection_start_nano, time_unix_nano=collection_start_nano, - count=sum(current_value), + count=sum(value), sum=sum_, - bucket_counts=tuple(current_value), + bucket_counts=tuple(value), explicit_bounds=self._boundaries, min=min_, max=max_, ) - if current_value is None: - current_value = self._get_empty_bucket_counts() + if value is None: + value = self._get_empty_bucket_counts() - if self._previous_cumulative_value is None: - self._previous_cumulative_value = ( + if self._previous_value is None: + self._previous_value = ( self._get_empty_bucket_counts() ) - self._previous_cumulative_value = [ - current_value_element + previous_cumulative_value_element + self._previous_value = [ + value_element + previous_value_element for ( - current_value_element, - previous_cumulative_value_element, - ) in zip(current_value, self._previous_cumulative_value) + value_element, + previous_value_element, + ) in zip(value, self._previous_value) ] self._previous_min = min(min_, self._previous_min) self._previous_max = max(max_, self._previous_max) @@ -516,9 +523,9 @@ def collect( attributes=self._attributes, start_time_unix_nano=self._start_time_unix_nano, time_unix_nano=collection_start_nano, - count=sum(self._previous_cumulative_value), + count=sum(self._previous_value), sum=self._previous_sum, - bucket_counts=tuple(self._previous_cumulative_value), + bucket_counts=tuple(self._previous_value), explicit_bounds=self._boundaries, min=self._previous_min, max=self._previous_max, @@ -580,6 +587,12 @@ def __init__( max_scale, ) + # This aggregation is analogous to _ExplicitBucketHistogramAggregation, + # the only difference is that with every call to aggregate, the size + # and amount of buckets can change (in + # _ExplicitBucketHistogramAggregation both size and amount of buckets + # remain constant once it is instantiated). + super().__init__(attributes) self._instrument_aggregation_temporality = ( @@ -589,8 +602,8 @@ def __init__( self._max_size = max_size self._max_scale = max_scale - self._current_value_positive = None - self._current_value_negative = None + self._value_positive = None + self._value_negative = None self._min = inf self._max = -inf self._sum = 0 @@ -598,8 +611,8 @@ def __init__( self._zero_count = 0 self._scale = None - self._previous_cumulative_value_positive = None - self._previous_cumulative_value_negative = None + self._previous_value_positive = None + self._previous_value_negative = None self._previous_min = inf self._previous_max = -inf self._previous_sum = 0 @@ -615,25 +628,21 @@ def aggregate(self, measurement: Measurement) -> None: # pylint: disable=too-many-branches,too-many-statements, too-many-locals with self._lock: + if self._value_positive is None: + self._value_positive = Buckets() + if self._value_negative is None: + self._value_negative = Buckets() - value = measurement.value - - if self._current_value_positive is None: - self._current_value_positive = Buckets() - if self._current_value_negative is None: - self._current_value_negative = Buckets() - - if value < self._min: - self._min = value + measurement_value = measurement.value - if value > self._max: - self._max = value + self._sum += measurement_value - self._sum += value + self._min = min(self._min, measurement_value) + self._max = max(self._max, measurement_value) self._count += 1 - if value == 0: + if measurement_value == 0: self._zero_count += 1 if self._count == self._zero_count: @@ -641,37 +650,37 @@ def aggregate(self, measurement: Measurement) -> None: return - elif value > 0: - current_value = self._current_value_positive + elif measurement_value > 0: + value = self._value_positive else: - value = -value - current_value = self._current_value_negative + measurement_value = -measurement_value + value = self._value_negative - index = self._mapping.map_to_index(value) + index = self._mapping.map_to_index(measurement_value) is_rescaling_needed = False low, high = 0, 0 - if len(current_value) == 0: - current_value.index_start = index - current_value.index_end = index - current_value.index_base = index + if len(value) == 0: + value.index_start = index + value.index_end = index + value.index_base = index elif ( - index < current_value.index_start - and (current_value.index_end - index) >= self._max_size + index < value.index_start + and (value.index_end - index) >= self._max_size ): is_rescaling_needed = True low = index - high = current_value.index_end + high = value.index_end elif ( - index > current_value.index_end - and (index - current_value.index_start) >= self._max_size + index > value.index_end + and (index - value.index_start) >= self._max_size ): is_rescaling_needed = True - low = current_value.index_start + low = value.index_start high = index if is_rescaling_needed: @@ -682,39 +691,39 @@ def aggregate(self, measurement: Measurement) -> None: # added to the histogram, the buckets can change in size. self._downscale( scale_change, - self._current_value_positive, - self._current_value_negative, + self._value_positive, + self._value_negative, ) self._mapping = self._new_mapping( self._mapping.scale - scale_change ) - index = self._mapping.map_to_index(value) + index = self._mapping.map_to_index(measurement_value) self._scale = self._mapping.scale - if index < current_value.index_start: - span = current_value.index_end - index + if index < value.index_start: + span = value.index_end - index - if span >= len(current_value.counts): - current_value.grow(span + 1, self._max_size) + if span >= len(value.counts): + value.grow(span + 1, self._max_size) - current_value.index_start = index + value.index_start = index - elif index > current_value.index_end: - span = index - current_value.index_start + elif index > value.index_end: + span = index - value.index_start - if span >= len(current_value.counts): - current_value.grow(span + 1, self._max_size) + if span >= len(value.counts): + value.grow(span + 1, self._max_size) - current_value.index_end = index + value.index_end = index - bucket_index = index - current_value.index_base + bucket_index = index - value.index_base if bucket_index < 0: - bucket_index += len(current_value.counts) + bucket_index += len(value.counts) - current_value.increment_bucket(bucket_index) + value.increment_bucket(bucket_index) def collect( self, @@ -727,8 +736,8 @@ def collect( # pylint: disable=too-many-statements, too-many-locals with self._lock: - current_value_positive = self._current_value_positive - current_value_negative = self._current_value_negative + value_positive = self._value_positive + value_negative = self._value_negative sum_ = self._sum min_ = self._min max_ = self._max @@ -736,8 +745,8 @@ def collect( zero_count = self._zero_count scale = self._scale - self._current_value_positive = None - self._current_value_negative = None + self._value_positive = None + self._value_negative = None self._sum = 0 self._min = inf self._max = -inf @@ -757,8 +766,8 @@ def collect( ): if ( - current_value_positive is None and - current_value_negative is None + value_positive is None and + value_negative is None ): return None @@ -778,15 +787,15 @@ def collect( scale=scale, zero_count=zero_count, positive=BucketsPoint( - offset=current_value_positive.offset, + offset=value_positive.offset, bucket_counts=( - current_value_positive.get_offset_counts() + value_positive.get_offset_counts() ), ), negative=BucketsPoint( - offset=current_value_negative.offset, + offset=value_negative.offset, bucket_counts=( - current_value_negative.get_offset_counts() + value_negative.get_offset_counts() ), ), # FIXME: Find the right value for flags @@ -810,30 +819,30 @@ def collect( # TODO, implement this case if ( - current_value_positive is None and - self._previous_cumulative_value_positive is None + value_positive is None and + self._previous_value_positive is None ): # This happens if collect is called for the first time # and aggregate has not yet been called. - current_value_positive = Buckets() - self._previous_cumulative_value_positive = ( - current_value_positive.copy_empty() + value_positive = Buckets() + self._previous_value_positive = ( + value_positive.copy_empty() ) if ( - current_value_negative is None and - self._previous_cumulative_value_negative is None + value_negative is None and + self._previous_value_negative is None ): - current_value_negative = Buckets() - self._previous_cumulative_value_negative = ( - current_value_negative.copy_empty() + value_negative = Buckets() + self._previous_value_negative = ( + value_negative.copy_empty() ) if scale is None and self._previous_scale is None: scale = self._mapping.scale self._previous_scale = scale if ( - current_value_positive is not None and - self._previous_cumulative_value_positive is None + value_positive is not None and + self._previous_value_positive is None ): # This happens when collect is called the very first time # and aggregate has been called before. @@ -854,32 +863,32 @@ def collect( # ones resulting in the current ones unchanged we need to # generate empty buckets that have the same size and amount # as the current ones, this is what copy_empty does. - self._previous_cumulative_value_positive = ( - current_value_positive.copy_empty() + self._previous_value_positive = ( + value_positive.copy_empty() ) if ( - current_value_negative is not None and - self._previous_cumulative_value_negative is None + value_negative is not None and + self._previous_value_negative is None ): - self._previous_cumulative_value_negative = ( - current_value_negative.copy_empty() + self._previous_value_negative = ( + value_negative.copy_empty() ) if scale is not None and self._previous_scale is None: self._previous_scale = scale if ( - current_value_positive is None and - self._previous_cumulative_value_positive is not None + value_positive is None and + self._previous_value_positive is not None ): - current_value_positive = ( - self._previous_cumulative_value_positive.copy_empty() + value_positive = ( + self._previous_value_positive.copy_empty() ) if ( - current_value_negative is None and - self._previous_cumulative_value_negative is not None + value_negative is None and + self._previous_value_negative is not None ): - current_value_negative = ( - self._previous_cumulative_value_negative.copy_empty() + value_negative = ( + self._previous_value_negative.copy_empty() ) if scale is None and self._previous_scale is not None: scale = self._previous_scale @@ -888,16 +897,16 @@ def collect( low_positive, high_positive = ( self._get_low_high_previous_current( - self._previous_cumulative_value_positive, - current_value_positive, + self._previous_value_positive, + value_positive, scale, min_scale, ) ) low_negative, high_negative = ( self._get_low_high_previous_current( - self._previous_cumulative_value_negative, - current_value_negative, + self._previous_value_negative, + value_negative, scale, min_scale, ) @@ -915,19 +924,19 @@ def collect( # (the histogram scale) would be zero. See exponential.go 191 self._downscale( self._previous_scale - min_scale, - self._previous_cumulative_value_positive, - self._previous_cumulative_value_negative, + self._previous_value_positive, + self._previous_value_negative, ) self._merge( - self._previous_cumulative_value_positive, - current_value_positive, + self._previous_value_positive, + value_positive, scale, min_scale, collection_aggregation_temporality, ) self._merge( - self._previous_cumulative_value_negative, - current_value_negative, + self._previous_value_negative, + value_negative, scale, min_scale, collection_aggregation_temporality, @@ -951,18 +960,18 @@ def collect( scale=self._previous_scale, zero_count=self._previous_zero_count, positive=BucketsPoint( - offset=self._previous_cumulative_value_positive.offset, + offset=self._previous_value_positive.offset, bucket_counts=( self. - _previous_cumulative_value_positive. + _previous_value_positive. get_offset_counts() ), ), negative=BucketsPoint( - offset=self._previous_cumulative_value_negative.offset, + offset=self._previous_value_negative.offset, bucket_counts=( self. - _previous_cumulative_value_negative. + _previous_value_negative. get_offset_counts() ), ), diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index e950213d379..c5d04d3d797 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -74,8 +74,8 @@ def swap( ): for attribute in [ - "_current_value_positive", - "_current_value_negative", + "_value_positive", + "_value_negative", "_sum", "_count", "_zero_count", @@ -139,22 +139,22 @@ def require_equal(self, a, b): self.assertEqual(a._mapping.scale, b._mapping.scale) self.assertEqual( - len(a._current_value_positive), len(b._current_value_positive) + len(a._value_positive), len(b._value_positive) ) self.assertEqual( - len(a._current_value_negative), len(b._current_value_negative) + len(a._value_negative), len(b._value_negative) ) - for index in range(len(a._current_value_positive)): + for index in range(len(a._value_positive)): self.assertEqual( - a._current_value_positive[index], - b._current_value_positive[index] + a._value_positive[index], + b._value_positive[index] ) - for index in range(len(a._current_value_negative)): + for index in range(len(a._value_negative)): self.assertEqual( - a._current_value_negative[index], - b._current_value_negative[index] + a._value_negative[index], + b._value_negative[index] ) def test_alternating_growth_0(self): @@ -182,13 +182,13 @@ def test_alternating_growth_0(self): exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.offset, + exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( get_counts( - exponential_histogram_aggregation._current_value_positive + exponential_histogram_aggregation._value_positive ), [1, 1, 1] ) @@ -215,13 +215,13 @@ def test_alternating_growth_1(self): exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock())) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.offset, + exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, -1) self.assertEqual( get_counts( - exponential_histogram_aggregation._current_value_positive + exponential_histogram_aggregation._value_positive ), [2, 3, 1] ) @@ -288,25 +288,25 @@ def test_permutations(self): ) self.assertEqual( exponential_histogram_aggregation. - _current_value_positive. + _value_positive. offset, expected["offset"], ) self.assertEqual( len( exponential_histogram_aggregation. - _current_value_positive + _value_positive ), expected["len"], ) self.assertEqual( exponential_histogram_aggregation. - _current_value_positive[0], + _value_positive[0], expected["at_0"], ) self.assertEqual( exponential_histogram_aggregation. - _current_value_positive[1], + _value_positive[1], expected["at_1"], ) @@ -355,7 +355,7 @@ def ascending_sequence_test( self.assertEqual( offset, exponential_histogram_aggregation. - _current_value_positive. + _value_positive. offset ) @@ -365,7 +365,7 @@ def ascending_sequence_test( sum_ += max_val self.assertNotEqual( - 0, exponential_histogram_aggregation._current_value_positive[0] + 0, exponential_histogram_aggregation._value_positive[0] ) # The maximum-index filled bucket is at or @@ -376,15 +376,15 @@ def ascending_sequence_test( total_count = 0 for index in range( - len(exponential_histogram_aggregation._current_value_positive) + len(exponential_histogram_aggregation._value_positive) ): total_count += ( exponential_histogram_aggregation. - _current_value_positive[index] + _value_positive[index] ) if ( exponential_histogram_aggregation. - _current_value_positive[index] != 0 + _value_positive[index] != 0 ): max_fill = index @@ -414,7 +414,7 @@ def ascending_sequence_test( self.assertEqual( index, exponential_histogram_aggregation. - _current_value_positive. + _value_positive. offset ) @@ -423,9 +423,9 @@ def ascending_sequence_test( self.assertEqual( index, exponential_histogram_aggregation. - _current_value_positive.offset + _value_positive.offset + len( - exponential_histogram_aggregation._current_value_positive + exponential_histogram_aggregation._value_positive ) - 1, ) @@ -457,7 +457,7 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual(0, exponential_histogram_aggregation._sum) expect = 0 - exponential_histogram_aggregation._current_value_positive = ( + exponential_histogram_aggregation._value_positive = ( Buckets() ) @@ -465,13 +465,12 @@ def mock_increment(self, bucket_index: int) -> None: expect += value * increment with patch.object( exponential_histogram_aggregation. - _current_value_positive, + _value_positive, "increment_bucket", - # new=positive_mock MethodType( mock_increment, exponential_histogram_aggregation. - _current_value_positive, + _value_positive, ), ): exponential_histogram_aggregation.aggregate( @@ -494,20 +493,20 @@ def mock_increment(self, bucket_index: int) -> None: 256 - ((1 << scale) - 1), len( exponential_histogram_aggregation. - _current_value_positive + _value_positive ), ) self.assertEqual( (1 << scale) - 1, exponential_histogram_aggregation. - _current_value_positive. + _value_positive. offset, ) for index in range(0, 256): self.assertLessEqual( exponential_histogram_aggregation. - _current_value_positive[index], + _value_positive[index], 6 * increment, ) @@ -553,17 +552,17 @@ def test_move_into(self): self.assertEqual( 256 - ((1 << scale) - 1), - len(exponential_histogram_aggregation_1._current_value_positive), + len(exponential_histogram_aggregation_1._value_positive), ) self.assertEqual( (1 << scale) - 1, - exponential_histogram_aggregation_1._current_value_positive.offset, + exponential_histogram_aggregation_1._value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( exponential_histogram_aggregation_1. - _current_value_positive[index], + _value_positive[index], 6 ) @@ -578,21 +577,21 @@ def test_very_large_numbers(self): def expect_balanced(count: int): self.assertEqual( 2, - len(exponential_histogram_aggregation._current_value_positive) + len(exponential_histogram_aggregation._value_positive) ) self.assertEqual( -1, exponential_histogram_aggregation. - _current_value_positive. + _value_positive. offset ) self.assertEqual( count, - exponential_histogram_aggregation._current_value_positive[0] + exponential_histogram_aggregation._value_positive[0] ) self.assertEqual( count, - exponential_histogram_aggregation._current_value_positive[1] + exponential_histogram_aggregation._value_positive[1] ) exponential_histogram_aggregation.aggregate( @@ -678,17 +677,17 @@ def test_full_range(self): self.assertEqual( _ExponentialBucketHistogramAggregation._min_max_size, - len(exponential_histogram_aggregation._current_value_positive), + len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( -1, - exponential_histogram_aggregation._current_value_positive.offset + exponential_histogram_aggregation._value_positive.offset ) self.assertLessEqual( - exponential_histogram_aggregation._current_value_positive[0], 2 + exponential_histogram_aggregation._value_positive[0], 2 ) self.assertLessEqual( - exponential_histogram_aggregation._current_value_positive[1], 1 + exponential_histogram_aggregation._value_positive[1], 1 ) def test_aggregator_min_max(self): @@ -753,8 +752,8 @@ def test_aggregator_copy_swap(self): ) # pylint: disable=unnecessary-dunder-call - exponential_histogram_aggregation_2._current_value_positive.__init__() - exponential_histogram_aggregation_2._current_value_negative.__init__() + exponential_histogram_aggregation_2._value_positive.__init__() + exponential_histogram_aggregation_2._value_negative.__init__() exponential_histogram_aggregation_2._sum = 0 exponential_histogram_aggregation_2._count = 0 exponential_histogram_aggregation_2._zero_count = 0 @@ -765,8 +764,8 @@ def test_aggregator_copy_swap(self): ) for attribute in [ - "_current_value_positive", - "_current_value_negative", + "_value_positive", + "_value_negative", "_sum", "_count", "_zero_count", @@ -805,9 +804,6 @@ def test_zero_count_by_increment(self): ) ) - # positive_mock = Mock( - # wraps=exponential_histogram_aggregation_1._current_value_positive - # ) def mock_increment(self, bucket_index: int) -> None: """ Increments a bucket @@ -815,15 +811,14 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment - exponential_histogram_aggregation_1._current_value_positive = Buckets() + exponential_histogram_aggregation_1._value_positive = Buckets() with patch.object( - exponential_histogram_aggregation_1._current_value_positive, + exponential_histogram_aggregation_1._value_positive, "increment_bucket", - # new=positive_mock MethodType( mock_increment, - exponential_histogram_aggregation_1._current_value_positive + exponential_histogram_aggregation_1._value_positive ), ): exponential_histogram_aggregation_1.aggregate( @@ -864,15 +859,14 @@ def mock_increment(self, bucket_index: int) -> None: self._counts[bucket_index] += increment - exponential_histogram_aggregation_1._current_value_positive = Buckets() + exponential_histogram_aggregation_1._value_positive = Buckets() with patch.object( - exponential_histogram_aggregation_1._current_value_positive, + exponential_histogram_aggregation_1._value_positive, "increment_bucket", - # new=positive_mock MethodType( mock_increment, - exponential_histogram_aggregation_1._current_value_positive + exponential_histogram_aggregation_1._value_positive ), ): exponential_histogram_aggregation_1.aggregate( @@ -942,7 +936,7 @@ def test_min_max_size(self): self.assertEqual( len( exponential_histogram_aggregation. - _current_value_positive. + _value_positive. _counts ), exponential_histogram_aggregation._min_max_size, @@ -1103,7 +1097,7 @@ def collect_and_validate(values, histogram) -> None: # run this test case with the same values used in a previous execution, # check the value printed by that previous execution of this test case # and use the same value for the seed variable in the line below. - seed = 4539544373807492135 + # seed = 4539544373807492135 random_generator = Random(seed) print(f"seed for {currentframe().f_code.co_name} is {seed}") @@ -1132,11 +1126,11 @@ def test_merge_collect_cumulative(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.offset, + exponential_histogram_aggregation._value_positive.offset, 0 ) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.counts, + exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1] ) @@ -1154,11 +1148,11 @@ def test_merge_collect_cumulative(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.offset, + exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.counts, + exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1] ) @@ -1183,11 +1177,11 @@ def test_merge_collect_delta(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.offset, + exponential_histogram_aggregation._value_positive.offset, 0 ) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.counts, + exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1] ) @@ -1203,11 +1197,11 @@ def test_merge_collect_delta(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.offset, + exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( - exponential_histogram_aggregation._current_value_positive.counts, + exponential_histogram_aggregation._value_positive.counts, [1, 1, 1, 1] ) diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 9d61da72a04..63fdcb4c1c4 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -68,7 +68,7 @@ def test_aggregate_delta(self): synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 6) + self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.DELTA, 0 @@ -78,7 +78,7 @@ def test_aggregate_delta(self): synchronous_sum_aggregation.aggregate(measurement(-2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 2) + self.assertEqual(synchronous_sum_aggregation._value, 2) def test_aggregate_cumulative(self): """ @@ -93,7 +93,7 @@ def test_aggregate_cumulative(self): synchronous_sum_aggregation.aggregate(measurement(2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 6) + self.assertEqual(synchronous_sum_aggregation._value, 6) synchronous_sum_aggregation = _SumAggregation( Mock(), True, AggregationTemporality.CUMULATIVE, 0 @@ -103,7 +103,7 @@ def test_aggregate_cumulative(self): synchronous_sum_aggregation.aggregate(measurement(-2)) synchronous_sum_aggregation.aggregate(measurement(3)) - self.assertEqual(synchronous_sum_aggregation._current_value, 2) + self.assertEqual(synchronous_sum_aggregation._value, 2) def test_collect_delta(self): """ @@ -293,22 +293,22 @@ def test_aggregate(self): # The first bucket keeps count of values between (-inf, 0] (-1 and 0) self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[0], 2 + explicit_bucket_histogram_aggregation._value[0], 2 ) # The second bucket keeps count of values between (0, 2] (1 and 2) self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[1], 2 + explicit_bucket_histogram_aggregation._value[1], 2 ) # The third bucket keeps count of values between (2, 4] (3 and 4) self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[2], 2 + explicit_bucket_histogram_aggregation._value[2], 2 ) # The fourth bucket keeps count of values between (4, inf) (3 and 4) self.assertEqual( - explicit_bucket_histogram_aggregation._current_value[3], 1 + explicit_bucket_histogram_aggregation._value[3], 1 ) histo = explicit_bucket_histogram_aggregation.collect( From 526400ab9397bfdb2f29616e391a07eb5308b38d Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 25 Jun 2024 17:43:50 -0600 Subject: [PATCH 28/35] Explain analogy with ExplicitBucket --- .../sdk/metrics/_internal/aggregation.py | 31 ++++++++++++++----- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index a2f5ac04882..d9b1301b0b2 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -657,6 +657,11 @@ def aggregate(self, measurement: Measurement) -> None: measurement_value = -measurement_value value = self._value_negative + # The following code finds out if it is necessary to change the + # buckets to hold the incoming measurement_value, changes them if + # necessary. This process does not exist in + # _ExplicitBucketHistogram aggregation because the buckets there + # are constant in size and amount. index = self._mapping.map_to_index(measurement_value) is_rescaling_needed = False @@ -686,9 +691,6 @@ def aggregate(self, measurement: Measurement) -> None: if is_rescaling_needed: scale_change = self._get_scale_change(low, high) - # _downscale changes the buckets. This is the main difference - # with the _ExplicitBucketHistogramAggregation, as values are - # added to the histogram, the buckets can change in size. self._downscale( scale_change, self._value_positive, @@ -723,6 +725,13 @@ def aggregate(self, measurement: Measurement) -> None: if bucket_index < 0: bucket_index += len(value.counts) + # Now the buckets have been changed if needed and bucket_index will + # be used to increment the counter of the bucket that needs to be + # incremented. + + # This is analogous to + # self._value[bisect_left(self._boundaries, measurement_value)] += 1 + # in _ExplicitBucketHistogramAggregation.aggregate value.increment_bucket(bucket_index) def collect( @@ -816,8 +825,6 @@ def collect( # need to be made so that they can be cumulatively aggregated # to the current buckets). - # TODO, implement this case - if ( value_positive is None and self._previous_value_positive is None @@ -919,14 +926,22 @@ def collect( - self._get_scale_change(low_negative, high_negative), ) - # FIXME Go implementation checks if the histogram (not the mapping - # but the histogram) has a count larger than zero, if not, scale - # (the histogram scale) would be zero. See exponential.go 191 self._downscale( self._previous_scale - min_scale, self._previous_value_positive, self._previous_value_negative, ) + + # self._merge adds the values from value to + # self._previous_value, this is analogous to + # self._previous_value = [ + # value_element + previous_value_element + # for ( + # value_element, + # previous_value_element, + # ) in zip(value, self._previous_value) + # ] + # in _ExplicitBucketHistogramAggregation.collect. self._merge( self._previous_value_positive, value_positive, From cac0ef5c45cc04e5968e079e7f1d0fce0945fec8 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 25 Jun 2024 17:46:51 -0600 Subject: [PATCH 29/35] Add changelog and fix lint --- CHANGELOG.md | 2 + .../sdk/metrics/_internal/aggregation.py | 79 +++------ .../exponential_histogram/buckets.py | 4 + ...xponential_bucket_histogram_aggregation.py | 152 ++++++------------ .../test_exponential_bucket_histogram.py | 58 ++----- .../tests/metrics/test_aggregation.py | 16 +- 6 files changed, 98 insertions(+), 213 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 44938228ca3..439a9eb9640 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -21,6 +21,8 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ([#3956](https://github.com/open-telemetry/opentelemetry-python/pull/3956)) - When encountering an error encoding metric attributes in the OTLP exporter, log the key that had an error. ([#3838](https://github.com/open-telemetry/opentelemetry-python/pull/3838)) +- Fix `ExponentialHistogramAggregation` + ([#3978](https://github.com/open-telemetry/opentelemetry-python/pull/3978)) - Log a warning when a `LogRecord` in `sdk/log` has dropped attributes due to reaching limits ([#3946](https://github.com/open-telemetry/opentelemetry-python/pull/3946)) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index d9b1301b0b2..c93b93cd9c6 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -301,9 +301,7 @@ def collect( if value is None: value = 0 - self._previous_value = ( - value + self._previous_value - ) + self._previous_value = value + self._previous_value return NumberDataPoint( attributes=self._attributes, @@ -504,9 +502,7 @@ def collect( value = self._get_empty_bucket_counts() if self._previous_value is None: - self._previous_value = ( - self._get_empty_bucket_counts() - ) + self._previous_value = self._get_empty_bucket_counts() self._previous_value = [ value_element + previous_value_element @@ -650,7 +646,7 @@ def aggregate(self, measurement: Measurement) -> None: return - elif measurement_value > 0: + if measurement_value > 0: value = self._value_positive else: @@ -774,10 +770,7 @@ def collect( is AggregationTemporality.DELTA ): - if ( - value_positive is None and - value_negative is None - ): + if value_positive is None and value_negative is None: return None previous_collection_start_nano = ( @@ -797,15 +790,11 @@ def collect( zero_count=zero_count, positive=BucketsPoint( offset=value_positive.offset, - bucket_counts=( - value_positive.get_offset_counts() - ), + bucket_counts=(value_positive.get_offset_counts()), ), negative=BucketsPoint( offset=value_negative.offset, - bucket_counts=( - value_negative.get_offset_counts() - ), + bucket_counts=(value_negative.get_offset_counts()), ), # FIXME: Find the right value for flags flags=0, @@ -826,30 +815,26 @@ def collect( # to the current buckets). if ( - value_positive is None and - self._previous_value_positive is None + value_positive is None + and self._previous_value_positive is None ): # This happens if collect is called for the first time # and aggregate has not yet been called. value_positive = Buckets() - self._previous_value_positive = ( - value_positive.copy_empty() - ) + self._previous_value_positive = value_positive.copy_empty() if ( - value_negative is None and - self._previous_value_negative is None + value_negative is None + and self._previous_value_negative is None ): value_negative = Buckets() - self._previous_value_negative = ( - value_negative.copy_empty() - ) + self._previous_value_negative = value_negative.copy_empty() if scale is None and self._previous_scale is None: scale = self._mapping.scale self._previous_scale = scale if ( - value_positive is not None and - self._previous_value_positive is None + value_positive is not None + and self._previous_value_positive is None ): # This happens when collect is called the very first time # and aggregate has been called before. @@ -870,33 +855,25 @@ def collect( # ones resulting in the current ones unchanged we need to # generate empty buckets that have the same size and amount # as the current ones, this is what copy_empty does. - self._previous_value_positive = ( - value_positive.copy_empty() - ) + self._previous_value_positive = value_positive.copy_empty() if ( - value_negative is not None and - self._previous_value_negative is None + value_negative is not None + and self._previous_value_negative is None ): - self._previous_value_negative = ( - value_negative.copy_empty() - ) + self._previous_value_negative = value_negative.copy_empty() if scale is not None and self._previous_scale is None: self._previous_scale = scale if ( - value_positive is None and - self._previous_value_positive is not None + value_positive is None + and self._previous_value_positive is not None ): - value_positive = ( - self._previous_value_positive.copy_empty() - ) + value_positive = self._previous_value_positive.copy_empty() if ( - value_negative is None and - self._previous_value_negative is not None + value_negative is None + and self._previous_value_negative is not None ): - value_negative = ( - self._previous_value_negative.copy_empty() - ) + value_negative = self._previous_value_negative.copy_empty() if scale is None and self._previous_scale is not None: scale = self._previous_scale @@ -977,17 +954,13 @@ def collect( positive=BucketsPoint( offset=self._previous_value_positive.offset, bucket_counts=( - self. - _previous_value_positive. - get_offset_counts() + self._previous_value_positive.get_offset_counts() ), ), negative=BucketsPoint( offset=self._previous_value_negative.offset, bucket_counts=( - self. - _previous_value_negative. - get_offset_counts() + self._previous_value_negative.get_offset_counts() ), ), # FIXME: Find the right value for flags diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py index ee0060a986f..8877985c234 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py @@ -181,6 +181,10 @@ def increment_bucket(self, bucket_index: int, increment: int = 1) -> None: def copy_empty(self) -> "Buckets": copy = Buckets() + # pylint: disable=no-member + # pylint: disable=protected-access + # pylint: disable=attribute-defined-outside-init + # pylint: disable=invalid-name copy._Buckets__index_base = self._Buckets__index_base copy._Buckets__index_start = self._Buckets__index_start copy._Buckets__index_end = self._Buckets__index_end diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index c5d04d3d797..df31af4ece9 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -138,23 +138,17 @@ def require_equal(self, a, b): self.assertEqual(a._mapping.scale, b._mapping.scale) - self.assertEqual( - len(a._value_positive), len(b._value_positive) - ) - self.assertEqual( - len(a._value_negative), len(b._value_negative) - ) + self.assertEqual(len(a._value_positive), len(b._value_positive)) + self.assertEqual(len(a._value_negative), len(b._value_negative)) for index in range(len(a._value_positive)): self.assertEqual( - a._value_positive[index], - b._value_positive[index] + a._value_positive[index], b._value_positive[index] ) for index in range(len(a._value_negative)): self.assertEqual( - a._value_negative[index], - b._value_negative[index] + a._value_negative[index], b._value_negative[index] ) def test_alternating_growth_0(self): @@ -182,15 +176,12 @@ def test_alternating_growth_0(self): exponential_histogram_aggregation.aggregate(Measurement(1, Mock())) self.assertEqual( - exponential_histogram_aggregation._value_positive.offset, - -1 + exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - get_counts( - exponential_histogram_aggregation._value_positive - ), - [1, 1, 1] + get_counts(exponential_histogram_aggregation._value_positive), + [1, 1, 1], ) def test_alternating_growth_1(self): @@ -215,15 +206,12 @@ def test_alternating_growth_1(self): exponential_histogram_aggregation.aggregate(Measurement(0.5, Mock())) self.assertEqual( - exponential_histogram_aggregation._value_positive.offset, - -1 + exponential_histogram_aggregation._value_positive.offset, -1 ) self.assertEqual(exponential_histogram_aggregation._mapping.scale, -1) self.assertEqual( - get_counts( - exponential_histogram_aggregation._value_positive - ), - [2, 3, 1] + get_counts(exponential_histogram_aggregation._value_positive), + [2, 3, 1], ) def test_permutations(self): @@ -272,7 +260,7 @@ def test_permutations(self): Mock(), AggregationTemporality.DELTA, Mock(), - max_size=2 + max_size=2, ) ) @@ -287,26 +275,19 @@ def test_permutations(self): expected["scale"], ) self.assertEqual( - exponential_histogram_aggregation. - _value_positive. - offset, + exponential_histogram_aggregation._value_positive.offset, expected["offset"], ) self.assertEqual( - len( - exponential_histogram_aggregation. - _value_positive - ), + len(exponential_histogram_aggregation._value_positive), expected["len"], ) self.assertEqual( - exponential_histogram_aggregation. - _value_positive[0], + exponential_histogram_aggregation._value_positive[0], expected["at_0"], ) self.assertEqual( - exponential_histogram_aggregation. - _value_positive[1], + exponential_histogram_aggregation._value_positive[1], expected["at_1"], ) @@ -328,7 +309,7 @@ def ascending_sequence_test( Mock(), AggregationTemporality.DELTA, Mock(), - max_size=max_size + max_size=max_size, ) ) @@ -354,9 +335,7 @@ def ascending_sequence_test( ) self.assertEqual( offset, - exponential_histogram_aggregation. - _value_positive. - offset + exponential_histogram_aggregation._value_positive.offset, ) exponential_histogram_aggregation.aggregate( @@ -379,12 +358,11 @@ def ascending_sequence_test( len(exponential_histogram_aggregation._value_positive) ): total_count += ( - exponential_histogram_aggregation. - _value_positive[index] + exponential_histogram_aggregation._value_positive[index] ) if ( - exponential_histogram_aggregation. - _value_positive[index] != 0 + exponential_histogram_aggregation._value_positive[index] + != 0 ): max_fill = index @@ -412,21 +390,15 @@ def ascending_sequence_test( index = mapping.map_to_index(min_val) self.assertEqual( - index, - exponential_histogram_aggregation. - _value_positive. - offset + index, exponential_histogram_aggregation._value_positive.offset ) index = mapping.map_to_index(max_val) self.assertEqual( index, - exponential_histogram_aggregation. - _value_positive.offset - + len( - exponential_histogram_aggregation._value_positive - ) + exponential_histogram_aggregation._value_positive.offset + + len(exponential_histogram_aggregation._value_positive) - 1, ) @@ -443,10 +415,7 @@ def mock_increment(self, bucket_index: int) -> None: exponential_histogram_aggregation = ( _ExponentialBucketHistogramAggregation( - Mock(), - AggregationTemporality.DELTA, - Mock(), - max_size=256 + Mock(), AggregationTemporality.DELTA, Mock(), max_size=256 ) ) @@ -457,20 +426,16 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual(0, exponential_histogram_aggregation._sum) expect = 0 - exponential_histogram_aggregation._value_positive = ( - Buckets() - ) + exponential_histogram_aggregation._value_positive = Buckets() for value in range(2, 257): expect += value * increment with patch.object( - exponential_histogram_aggregation. - _value_positive, + exponential_histogram_aggregation._value_positive, "increment_bucket", MethodType( mock_increment, - exponential_histogram_aggregation. - _value_positive, + exponential_histogram_aggregation._value_positive, ), ): exponential_histogram_aggregation.aggregate( @@ -491,22 +456,16 @@ def mock_increment(self, bucket_index: int) -> None: self.assertEqual( 256 - ((1 << scale) - 1), - len( - exponential_histogram_aggregation. - _value_positive - ), + len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( (1 << scale) - 1, - exponential_histogram_aggregation. - _value_positive. - offset, + exponential_histogram_aggregation._value_positive.offset, ) for index in range(0, 256): self.assertLessEqual( - exponential_histogram_aggregation. - _value_positive[index], + exponential_histogram_aggregation._value_positive[index], 6 * increment, ) @@ -561,9 +520,7 @@ def test_move_into(self): for index in range(0, 256): self.assertLessEqual( - exponential_histogram_aggregation_1. - _value_positive[index], - 6 + exponential_histogram_aggregation_1._value_positive[index], 6 ) def test_very_large_numbers(self): @@ -576,22 +533,16 @@ def test_very_large_numbers(self): def expect_balanced(count: int): self.assertEqual( - 2, - len(exponential_histogram_aggregation._value_positive) + 2, len(exponential_histogram_aggregation._value_positive) ) self.assertEqual( - -1, - exponential_histogram_aggregation. - _value_positive. - offset + -1, exponential_histogram_aggregation._value_positive.offset ) self.assertEqual( - count, - exponential_histogram_aggregation._value_positive[0] + count, exponential_histogram_aggregation._value_positive[0] ) self.assertEqual( - count, - exponential_histogram_aggregation._value_positive[1] + count, exponential_histogram_aggregation._value_positive[1] ) exponential_histogram_aggregation.aggregate( @@ -680,8 +631,7 @@ def test_full_range(self): len(exponential_histogram_aggregation._value_positive), ) self.assertEqual( - -1, - exponential_histogram_aggregation._value_positive.offset + -1, exponential_histogram_aggregation._value_positive.offset ) self.assertLessEqual( exponential_histogram_aggregation._value_positive[0], 2 @@ -818,7 +768,7 @@ def mock_increment(self, bucket_index: int) -> None: "increment_bucket", MethodType( mock_increment, - exponential_histogram_aggregation_1._value_positive + exponential_histogram_aggregation_1._value_positive, ), ): exponential_histogram_aggregation_1.aggregate( @@ -866,7 +816,7 @@ def mock_increment(self, bucket_index: int) -> None: "increment_bucket", MethodType( mock_increment, - exponential_histogram_aggregation_1._value_positive + exponential_histogram_aggregation_1._value_positive, ), ): exponential_histogram_aggregation_1.aggregate( @@ -934,11 +884,7 @@ def test_min_max_size(self): # This means the smallest max_scale is enough for the full range of the # normal floating point values. self.assertEqual( - len( - exponential_histogram_aggregation. - _value_positive. - _counts - ), + len(exponential_histogram_aggregation._value_positive._counts), exponential_histogram_aggregation._min_max_size, ) @@ -1126,12 +1072,11 @@ def test_merge_collect_cumulative(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._value_positive.offset, - 0 + exponential_histogram_aggregation._value_positive.offset, 0 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, - [1, 1, 1, 1] + [1, 1, 1, 1], ) result_0 = exponential_histogram_aggregation.collect( @@ -1148,12 +1093,11 @@ def test_merge_collect_cumulative(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._value_positive.offset, - -4 + exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, - [1, 1, 1, 1] + [1, 1, 1, 1], ) result_1 = exponential_histogram_aggregation.collect( @@ -1177,12 +1121,11 @@ def test_merge_collect_delta(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._value_positive.offset, - 0 + exponential_histogram_aggregation._value_positive.offset, 0 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, - [1, 1, 1, 1] + [1, 1, 1, 1], ) result = exponential_histogram_aggregation.collect( @@ -1197,12 +1140,11 @@ def test_merge_collect_delta(self): self.assertEqual(exponential_histogram_aggregation._mapping.scale, 0) self.assertEqual( - exponential_histogram_aggregation._value_positive.offset, - -4 + exponential_histogram_aggregation._value_positive.offset, -4 ) self.assertEqual( exponential_histogram_aggregation._value_positive.counts, - [1, 1, 1, 1] + [1, 1, 1, 1], ) result_1 = exponential_histogram_aggregation.collect( diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index d6d6799fd94..545ee23b06e 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -23,7 +23,7 @@ InMemoryMetricReader, ) from opentelemetry.sdk.metrics.view import ( - ExponentialBucketHistogramAggregation + ExponentialBucketHistogramAggregation, ) @@ -78,14 +78,8 @@ def test_synchronous_delta_temporality(self): previous_time_unix_nano = metric_data.time_unix_nano - self.assertEqual( - metric_data.positive.bucket_counts, - [1] - ) - self.assertEqual( - metric_data.negative.bucket_counts, - [0] - ) + self.assertEqual(metric_data.positive.bucket_counts, [1]) + self.assertEqual(metric_data.negative.bucket_counts, [0]) self.assertLess( metric_data.start_time_unix_nano, @@ -107,14 +101,8 @@ def test_synchronous_delta_temporality(self): previous_time_unix_nano, metric_data.start_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano - self.assertEqual( - metric_data.positive.bucket_counts, - [1] - ) - self.assertEqual( - metric_data.negative.bucket_counts, - [0] - ) + self.assertEqual(metric_data.positive.bucket_counts, [1]) + self.assertEqual(metric_data.negative.bucket_counts, [0]) self.assertLess( metric_data.start_time_unix_nano, metric_data.time_unix_nano ) @@ -207,18 +195,17 @@ def test_synchronous_cumulative_temporality(self): metric_data.time_unix_nano, ) self.assertEqual( - metric_data.min, min(self.test_values[:index + 2]) + metric_data.min, min(self.test_values[: index + 2]) ) self.assertEqual( - metric_data.max, max(self.test_values[:index + 2]) + metric_data.max, max(self.test_values[: index + 2]) ) self.assertEqual( - metric_data.sum, sum(self.test_values[:index + 2]) + metric_data.sum, sum(self.test_values[: index + 2]) ) self.assertGreater( - metric_data.time_unix_nano, - previous_time_unix_nano + metric_data.time_unix_nano, previous_time_unix_nano ) previous_time_unix_nano = metric_data.time_unix_nano @@ -242,10 +229,7 @@ def test_synchronous_cumulative_temporality(self): *[0] * 40, ], ) - self.assertEqual( - metric_data.negative.bucket_counts, - [0] - ) + self.assertEqual(metric_data.negative.bucket_counts, [0]) results = [] @@ -284,20 +268,11 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( previous_metric_data.start_time_unix_nano, - metric_data.start_time_unix_nano - ) - self.assertEqual( - previous_metric_data.min, - metric_data.min - ) - self.assertEqual( - previous_metric_data.max, - metric_data.max - ) - self.assertEqual( - previous_metric_data.sum, - metric_data.sum + metric_data.start_time_unix_nano, ) + self.assertEqual(previous_metric_data.min, metric_data.min) + self.assertEqual(previous_metric_data.max, metric_data.max) + self.assertEqual(previous_metric_data.sum, metric_data.sum) self.assertEqual( metric_data.positive.bucket_counts, @@ -318,10 +293,7 @@ def test_synchronous_cumulative_temporality(self): *[0] * 40, ], ) - self.assertEqual( - metric_data.negative.bucket_counts, - [0] - ) + self.assertEqual(metric_data.negative.bucket_counts, [0]) self.assertLess( previous_metric_data.time_unix_nano, diff --git a/opentelemetry-sdk/tests/metrics/test_aggregation.py b/opentelemetry-sdk/tests/metrics/test_aggregation.py index 63fdcb4c1c4..7ea463ec8a8 100644 --- a/opentelemetry-sdk/tests/metrics/test_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/test_aggregation.py @@ -292,24 +292,16 @@ def test_aggregate(self): explicit_bucket_histogram_aggregation.aggregate(measurement(5)) # The first bucket keeps count of values between (-inf, 0] (-1 and 0) - self.assertEqual( - explicit_bucket_histogram_aggregation._value[0], 2 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[0], 2) # The second bucket keeps count of values between (0, 2] (1 and 2) - self.assertEqual( - explicit_bucket_histogram_aggregation._value[1], 2 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[1], 2) # The third bucket keeps count of values between (2, 4] (3 and 4) - self.assertEqual( - explicit_bucket_histogram_aggregation._value[2], 2 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[2], 2) # The fourth bucket keeps count of values between (4, inf) (3 and 4) - self.assertEqual( - explicit_bucket_histogram_aggregation._value[3], 1 - ) + self.assertEqual(explicit_bucket_histogram_aggregation._value[3], 1) histo = explicit_bucket_histogram_aggregation.collect( AggregationTemporality.CUMULATIVE, 1 From b834964be4cd06fb7478359d038dabc6f6e41f5a Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 25 Jun 2024 18:14:43 -0600 Subject: [PATCH 30/35] Fix equality tests --- .../test_exponential_bucket_histogram.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index 545ee23b06e..0192fbbcc35 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -108,7 +108,11 @@ def test_synchronous_delta_temporality(self): ) self.assertEqual(metric_data.min, self.test_values[index + 1]) self.assertEqual(metric_data.max, self.test_values[index + 1]) - self.assertEqual(metric_data.sum, self.test_values[index + 1]) + # Using assertAlmostEqual here because in 3.12 resolution can cause + # these checks to fail. + self.assertAlmostEqual( + metric_data.sum, self.test_values[index + 1] + ) results = [] @@ -200,7 +204,7 @@ def test_synchronous_cumulative_temporality(self): self.assertEqual( metric_data.max, max(self.test_values[: index + 2]) ) - self.assertEqual( + self.assertAlmostEqual( metric_data.sum, sum(self.test_values[: index + 2]) ) @@ -254,7 +258,7 @@ def test_synchronous_cumulative_temporality(self): ) self.assertEqual(metric_data.min, min(self.test_values)) self.assertEqual(metric_data.max, max(self.test_values)) - self.assertEqual(metric_data.sum, sum(self.test_values)) + self.assertAlmostEqual(metric_data.sum, sum(self.test_values)) previous_metric_data = metric_data @@ -272,7 +276,7 @@ def test_synchronous_cumulative_temporality(self): ) self.assertEqual(previous_metric_data.min, metric_data.min) self.assertEqual(previous_metric_data.max, metric_data.max) - self.assertEqual(previous_metric_data.sum, metric_data.sum) + self.assertAlmostEqual(previous_metric_data.sum, metric_data.sum) self.assertEqual( metric_data.positive.bucket_counts, From 06bca7e64d5852d091583503179c87fe253687b1 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Wed, 3 Jul 2024 10:33:38 -0600 Subject: [PATCH 31/35] Fix location of returns --- .../opentelemetry/sdk/metrics/_internal/aggregation.py | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index c93b93cd9c6..c2c0a965868 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -278,9 +278,6 @@ def collect( is AggregationTemporality.DELTA ): - if value is None: - return None - previous_collection_start_nano = ( self._previous_collection_start_nano ) @@ -288,7 +285,7 @@ def collect( collection_start_nano ) - if current_value is None: + if value is None: return None return NumberDataPoint( @@ -473,9 +470,6 @@ def collect( is AggregationTemporality.DELTA ): - if value is None: - return None - previous_collection_start_nano = ( self._previous_collection_start_nano ) @@ -483,7 +477,7 @@ def collect( collection_start_nano ) - if current_value is None: + if value is None: return None return HistogramDataPoint( From 2ef5abdcbf40b83c3601e80a94ac2e4370a3380c Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Tue, 9 Jul 2024 11:11:37 -0600 Subject: [PATCH 32/35] Fix another issue and add test case --- .../sdk/metrics/_internal/aggregation.py | 6 ++-- .../test_exponential_bucket_histogram.py | 35 +++++++++++++++++++ 2 files changed, 38 insertions(+), 3 deletions(-) diff --git a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py index c2c0a965868..62ac967bbec 100644 --- a/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py +++ b/opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py @@ -764,9 +764,6 @@ def collect( is AggregationTemporality.DELTA ): - if value_positive is None and value_negative is None: - return None - previous_collection_start_nano = ( self._previous_collection_start_nano ) @@ -774,6 +771,9 @@ def collect( collection_start_nano ) + if value_positive is None and value_negative is None: + return None + return ExponentialHistogramDataPoint( attributes=self._attributes, start_time_unix_nano=previous_collection_start_nano, diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index 0192fbbcc35..aca732bf424 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -13,6 +13,7 @@ # limitations under the License. from platform import system +from time import sleep from unittest import TestCase from pytest import mark @@ -125,6 +126,40 @@ def test_synchronous_delta_temporality(self): for metrics_data in results: self.assertIsNone(metrics_data) + results = [] + + histogram.record(1) + results.append(reader.get_metrics_data()) + + sleep(0.1) + results.append(reader.get_metrics_data()) + + histogram.record(2) + results.append(reader.get_metrics_data()) + + metric_data_0 = ( + results[0] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + metric_data_2 = ( + results[2] + .resource_metrics[0] + .scope_metrics[0] + .metrics[0] + .data.data_points[0] + ) + + self.assertIsNone(results[1]) + + self.assertGreater( + metric_data_2.start_time_unix_nano, metric_data_0.time_unix_nano + ) + + provider.shutdown() + @mark.skipif( system() == "Windows", reason=( From 1aacf81896b72068714eceb4798839b0f5ab98c3 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Thu, 11 Jul 2024 13:26:16 -0600 Subject: [PATCH 33/35] Added comments to integration test case --- .../test_exponential_bucket_histogram.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index aca732bf424..7b283e617e7 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -54,6 +54,8 @@ def test_synchronous_delta_temporality(self): histogram = meter.create_histogram("histogram") + # The test scenario here is calling collect without calling aggregate + # ever before. results = [] for _ in range(10): @@ -63,6 +65,7 @@ def test_synchronous_delta_temporality(self): for metrics_data in results: self.assertIsNone(metrics_data) + # The test scenario here is calling aggregate then collect repeatedly. results = [] for test_value in self.test_values: @@ -115,6 +118,9 @@ def test_synchronous_delta_temporality(self): metric_data.sum, self.test_values[index + 1] ) + # The test scenario here is calling collect without calling aggregate + # immediately before, but having aggregate being called before at some + # moment. results = [] for _ in range(10): @@ -126,6 +132,9 @@ def test_synchronous_delta_temporality(self): for metrics_data in results: self.assertIsNone(metrics_data) + # The test scenario here is calling aggregate and collect, waiting for + # a certain amount of time, calling collect, then calling aggregate and + # collect again. results = [] histogram.record(1) From 374fb3b77e69db0495b596a45fe3ad28ef4f61ad Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 15 Jul 2024 11:19:27 -0600 Subject: [PATCH 34/35] Fix lint --- .../test_exponential_bucket_histogram_aggregation.py | 2 +- .../integration_test/test_exponential_bucket_histogram.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py index df31af4ece9..85c28070c15 100644 --- a/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py +++ b/opentelemetry-sdk/tests/metrics/exponential_histogram/test_exponential_bucket_histogram_aggregation.py @@ -15,11 +15,11 @@ # pylint: disable=protected-access,too-many-lines,invalid-name # pylint: disable=consider-using-enumerate,no-self-use,too-many-public-methods -from random import Random, randrange from inspect import currentframe from itertools import permutations from logging import WARNING from math import ldexp +from random import Random, randrange from sys import float_info, maxsize from types import MethodType from unittest.mock import Mock, patch diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index 7b283e617e7..b2e188fa880 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -281,7 +281,7 @@ def test_synchronous_cumulative_temporality(self): results = [] - for i in range(10): + for _ in range(10): results.append(reader.get_metrics_data()) provider.shutdown() From 295464113f2cd7aec4a55834e2c17558d37a29e2 Mon Sep 17 00:00:00 2001 From: Diego Hurtado Date: Mon, 15 Jul 2024 14:50:07 -0600 Subject: [PATCH 35/35] Add documentation for test case --- .../integration_test/test_exponential_bucket_histogram.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py index b2e188fa880..6574bf43357 100644 --- a/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py +++ b/opentelemetry-sdk/tests/metrics/integration_test/test_exponential_bucket_histogram.py @@ -41,6 +41,14 @@ class TestExponentialBucketHistogramAggregation(TestCase): ), ) def test_synchronous_delta_temporality(self): + """ + This test case instantiates an exponential histogram aggregation and + then uses it to record measurements and get metrics. The order in which + these actions are taken are relevant to the testing that happens here. + For this reason, the aggregation is only instantiated once, since the + reinstantiation of the aggregation would defeat the purpose of this + test case. + """ aggregation = ExponentialBucketHistogramAggregation()