Skip to content

Commit 6604d85

Browse files
committed
WIP
1 parent a1271e2 commit 6604d85

File tree

3 files changed

+29
-33
lines changed
  • exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter
  • opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal

3 files changed

+29
-33
lines changed

exporter/opentelemetry-exporter-otlp-proto-grpc/src/opentelemetry/exporter/otlp/proto/grpc/metric_exporter/__init__.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -302,7 +302,6 @@ def _translate_data(
302302
)
303303

304304
else:
305-
[data_point for data_point in metric.data.data_points]
306305
_logger.warning(
307306
"unsupported data type %s",
308307
metric.data.__class__.__name__,

opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/aggregation.py

Lines changed: 23 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -364,13 +364,15 @@ def collect(
364364

365365
# pylint: disable=protected-access
366366
class _ExponentialBucketHistogramAggregation(_Aggregation[HistogramPoint]):
367-
# min_max_size is the smallest reasonable configuration, which is small
368-
# enough to contain the entire normal floating point range at min
369-
# scale.
367+
# _min_max_size and _max_max_size are the smallest and largest values
368+
# the max_size parameter may have, respectively.
369+
370+
# _min_max_size is is the smallest reasonable value which is small enough
371+
# to contain the entire normal floating point range at the minimum scale.
370372
_min_max_size = 2
371373

372-
# max_max_size is an arbitrary limit meant to limit accidental use of
373-
# giant histograms.
374+
# _max_max_size is an arbitrary limit meant to limit accidental creation of
375+
# giant exponential bucket histograms.
374376
_max_max_size = 16384
375377

376378
def __init__(
@@ -384,9 +386,8 @@ def __init__(
384386
max_size: int = 160,
385387
):
386388
super().__init__(attributes)
387-
# maxSize is the maximum capacity of the positive and negative ranges.
388-
# it is set by Init(), preserved by Copy and Move.)
389-
389+
# max_size is the maximum capacity of the positive and negative
390+
# buckets.
390391
if max_size < self._min_max_size:
391392
raise Exception("size {max_size} is smaller than {min_max_size}")
392393

@@ -395,32 +396,32 @@ def __init__(
395396

396397
self._max_size = max_size
397398

398-
# _sum is the sum of all calls to aggregate reflected in the
399-
# aggregator.
399+
# _sum is the sum of all the values aggregated by this aggregator.
400400
self._sum = 0
401401

402-
# count is incremented by 1 per call to aggregate.
402+
# _count is the count of all calls to aggregate.
403403
self._count = 0
404404

405-
# zero_count is incremented by 1 when the measured value is exactly 0.
405+
# _zero_count is the count of all the calls to aggregate when the value
406+
# to be aggregated is exactly 0.
406407
self._zero_count = 0
407408

408-
# _min is set when count > 0
409-
self._min = 0
409+
# _min is the smallest value aggregated by this aggregator.
410+
self._min = inf
410411

411-
# _max is set when count > 0
412-
self._max = 0
412+
# _max is the smallest value aggregated by this aggregator.
413+
self._max = -inf
413414

414-
# _positive holds the positive values
415+
# _positive holds the positive values.
415416
self._positive = Buckets()
416417

417-
# _negative holds the negative values by their absolute value
418+
# _negative holds the negative values by their absolute value.
418419
self._negative = Buckets()
419420

420-
# _mapping corresponds to the current scale, is shared by both positive
421-
# and negative ranges.
422-
421+
# _mapping corresponds to the current scale, is shared by both the
422+
# positive and negative buckets.
423423
self._mapping = LogarithmMapping(LogarithmMapping._max_scale)
424+
424425
self._instrument_temporality = AggregationTemporality.DELTA
425426
self._start_time_unix_nano = start_time_unix_nano
426427

@@ -763,7 +764,7 @@ def _grow(self, buckets: Buckets, needed: int):
763764
new_size = self._max_size
764765

765766
new_positive_limit = new_size - bias
766-
buckets._backing.grow_to(
767+
buckets._backing.grow(
767768
new_size, old_positive_limit, new_positive_limit
768769
)
769770

opentelemetry-sdk/src/opentelemetry/sdk/metrics/_internal/exponential_histogram/buckets.py

Lines changed: 6 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -15,16 +15,15 @@
1515
from abc import ABC, abstractmethod
1616

1717

18-
class BucketsBacking(ABC):
18+
class Backing(ABC):
1919
@abstractmethod
2020
def size(self) -> int:
2121
"""
22-
Returns the physical size of the backing array, which is
23-
>= buckets.Len() the number allocated.
22+
Returns the physical size of
2423
"""
2524

2625
@abstractmethod
27-
def grow_to(
26+
def grow(
2827
self, new_size: int, old_positive_limit: int, new_positive_limit: int
2928
) -> None:
3029
"""
@@ -63,7 +62,7 @@ def reset(self) -> None:
6362
"""
6463

6564

66-
class BucketsVarWidth(BucketsBacking):
65+
class VariableWidthBacking(Backing):
6766
def __init__(self):
6867

6968
self._counts = [0]
@@ -75,16 +74,13 @@ def size(self) -> int:
7574
"""
7675
return len(self._counts)
7776

78-
def grow_to(
77+
def grow(
7978
self, new_size: int, old_positive_limit: int, new_positive_limit: int
8079
) -> None:
8180
"""
8281
Grows the backing array into a new size and copies old entries into
8382
their correct new positions.
8483
"""
85-
# FIXME this follows Go implementation maybe too closely. Since we
86-
# don't need to request memory for a larger list, maybe this can be
87-
# implemented in a more pythonical way.
8884
tmp = [0] * new_size
8985
tmp[new_positive_limit:] = self._counts[old_positive_limit:]
9086
tmp[0:old_positive_limit] = self._counts[0:old_positive_limit]
@@ -132,7 +128,7 @@ def reset(self) -> None:
132128

133129
class Buckets:
134130
def __init__(self):
135-
self._backing = BucketsVarWidth()
131+
self._backing = VariableWidthBacking()
136132

137133
# The term "index" refers to the number of the
138134
# histogram bucket used to determine its boundaries.

0 commit comments

Comments
 (0)