Skip to content

Commit 653f653

Browse files
authored
prometheusbp: add support for native histograms (#709)
* prometheusbp: add support for native histograms Add support for native histograms using the following settings: - NativeHistogramBucketFactor: 1.1 - NativeHistogramMaxBucketNumber: 160 - NativeHistogramMinResetDuration: 1h
1 parent 424face commit 653f653

14 files changed

Lines changed: 216 additions & 211 deletions

File tree

ecinterface/global.go

Lines changed: 1 addition & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,14 +12,9 @@ import (
1212
"github.com/reddit/baseplate.go/log"
1313
)
1414

15-
const (
16-
promNamespace = "ecinterface"
17-
)
18-
1915
var (
2016
getBeforeSet = promauto.With(prometheusbpint.GlobalRegistry).NewCounter(prometheus.CounterOpts{
21-
Namespace: promNamespace,
22-
Name: "get_before_set_total",
17+
Name: "ecinterface_get_before_set_total",
2318
Help: "Total number of ecinterface.Get calls before Set is called",
2419
})
2520
)

grpcbp/prometheus.go

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -33,11 +33,10 @@ var (
3333
successLabel,
3434
}
3535

36-
serverLatencyDistribution = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
37-
Name: "grpc_server_latency_seconds",
38-
Help: "RPC latencies",
39-
Buckets: prometheusbp.DefaultLatencyBuckets,
40-
}, serverLatencyLabels)
36+
serverLatencyDistribution = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
37+
Name: "grpc_server_latency_seconds",
38+
Help: "RPC latencies",
39+
}.ToPrometheus(), serverLatencyLabels)
4140

4241
serverTotalRequestLabels = []string{
4342
serviceLabel,
@@ -73,11 +72,10 @@ var (
7372
clientNameLabel,
7473
}
7574

76-
clientLatencyDistribution = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
77-
Name: "grpc_client_latency_seconds",
78-
Help: "RPC latencies",
79-
Buckets: prometheusbp.DefaultLatencyBuckets,
80-
}, clientLatencyLabels)
75+
clientLatencyDistribution = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
76+
Name: "grpc_client_latency_seconds",
77+
Help: "RPC latencies",
78+
}.ToPrometheus(), clientLatencyLabels)
8179

8280
clientTotalRequestLabels = []string{
8381
serviceLabel,

headerbp/metrics.go

Lines changed: 16 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@ import (
55
"github.com/prometheus/client_golang/prometheus/promauto"
66

77
"github.com/reddit/baseplate.go/internal/prometheusbpint"
8+
"github.com/reddit/baseplate.go/prometheusbp"
89
)
910

1011
const (
@@ -37,22 +38,22 @@ var (
3738
headerNameLabel,
3839
})
3940

40-
clientHeadersSentTotal = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
41-
Name: "baseplate_client_sent_headers_total",
42-
Help: "Total number of internal headers that were automatically sent by a client",
43-
Buckets: []float64{1, 4, 8, 16, 32, 64, 128},
44-
}, []string{
41+
clientHeadersSentTotal = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
42+
Name: "baseplate_client_sent_headers_total",
43+
Help: "Total number of internal headers that were automatically sent by a client",
44+
LegacyBuckets: []float64{1, 4, 8, 16, 32, 64, 128},
45+
}.ToPrometheus(), []string{
4546
rpcTypeLabel,
4647
serviceLabel,
4748
clientNameLabel,
4849
clientMethodLabel,
4950
})
5051

51-
clientHeadersSentSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
52-
Name: "baseplate_client_headers_sent_size_bytes",
53-
Help: "Estimated size (in bytes) of internal headers that were automatically sent by a client",
54-
Buckets: []float64{1, 64, 128, 256, 512, 1024, 2048, 4096},
55-
}, []string{
52+
clientHeadersSentSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
53+
Name: "baseplate_client_headers_sent_size_bytes",
54+
Help: "Estimated size (in bytes) of internal headers that were automatically sent by a client",
55+
LegacyBuckets: []float64{1, 64, 128, 256, 512, 1024, 2048, 4096},
56+
}.ToPrometheus(), []string{
5657
rpcTypeLabel,
5758
serviceLabel,
5859
clientNameLabel,
@@ -69,11 +70,11 @@ var (
6970
headerNameLabel,
7071
})
7172

72-
serverHeadersReceivedSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
73-
Name: "baseplate_server_headers_received_size_bytes",
74-
Help: "Estimated size (in bytes) of internal headers that were automatically extracted by a server",
75-
Buckets: []float64{1, 64, 128, 256, 512, 1024, 2048, 4096},
76-
}, []string{
73+
serverHeadersReceivedSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
74+
Name: "baseplate_server_headers_received_size_bytes",
75+
Help: "Estimated size (in bytes) of internal headers that were automatically extracted by a server",
76+
LegacyBuckets: []float64{1, 64, 128, 256, 512, 1024, 2048, 4096},
77+
}.ToPrometheus(), []string{
7778
rpcTypeLabel,
7879
serviceLabel,
7980
serverMethodLabel,

httpbp/prometheus.go

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -39,35 +39,32 @@ var (
3939
endpointLabel,
4040
}
4141

42-
serverLatency = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
43-
Name: "http_server_latency_seconds",
44-
Help: "HTTP server request latencies",
45-
Buckets: prometheusbp.DefaultLatencyBuckets,
46-
}, serverLabels)
47-
48-
serverRequestSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
49-
Name: "http_server_request_size_bytes",
50-
Help: "Request size",
51-
Buckets: payloadSizeBuckets,
52-
}, serverLabels)
53-
54-
serverResponseSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
55-
Name: "http_server_response_size_bytes",
56-
Help: "Response size",
57-
Buckets: payloadSizeBuckets,
58-
}, serverLabels)
59-
60-
serverTimeToWriteHeader = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
61-
Name: "http_server_time_to_write_header_seconds",
62-
Help: "Request size",
63-
Buckets: prometheusbp.DefaultLatencyBuckets,
64-
}, serverLabels)
65-
66-
serverTimeToFirstByte = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
67-
Name: "http_server_time_to_first_byte_seconds",
68-
Help: "Time elapsed before first byte was sent",
69-
Buckets: prometheusbp.DefaultLatencyBuckets,
70-
}, serverLabels)
42+
serverLatency = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
43+
Name: "http_server_latency_seconds",
44+
Help: "HTTP server request latencies",
45+
}.ToPrometheus(), serverLabels)
46+
47+
serverRequestSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
48+
Name: "http_server_request_size_bytes",
49+
Help: "Request size",
50+
LegacyBuckets: payloadSizeBuckets,
51+
}.ToPrometheus(), serverLabels)
52+
53+
serverResponseSize = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
54+
Name: "http_server_response_size_bytes",
55+
Help: "Response size",
56+
LegacyBuckets: payloadSizeBuckets,
57+
}.ToPrometheus(), serverLabels)
58+
59+
serverTimeToWriteHeader = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
60+
Name: "http_server_time_to_write_header_seconds",
61+
Help: "Request size",
62+
}.ToPrometheus(), serverLabels)
63+
64+
serverTimeToFirstByte = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
65+
Name: "http_server_time_to_first_byte_seconds",
66+
Help: "Time elapsed before first byte was sent",
67+
}.ToPrometheus(), serverLabels)
7168

7269
serverTotalRequestLabels = []string{
7370
methodLabel,
@@ -100,11 +97,10 @@ var (
10097
clientNameLabel,
10198
}
10299

103-
clientLatencyDistribution = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
104-
Name: "http_client_latency_seconds",
105-
Help: "HTTP client request latencies",
106-
Buckets: prometheusbp.DefaultLatencyBuckets,
107-
}, clientLatencyLabels)
100+
clientLatencyDistribution = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
101+
Name: "http_client_latency_seconds",
102+
Help: "HTTP client request latencies",
103+
}.ToPrometheus(), clientLatencyLabels)
108104

109105
clientTotalRequestLabels = []string{
110106
methodLabel,

internal/limitopen/limitopen.go

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,6 @@ import (
1515
)
1616

1717
const (
18-
promNamespace = "limitopen"
19-
2018
pathLabel = "path"
2119
)
2220

@@ -26,15 +24,13 @@ var (
2624
}
2725

2826
sizeGauge = promauto.With(prometheusbpint.GlobalRegistry).NewGaugeVec(prometheus.GaugeOpts{
29-
Namespace: promNamespace,
30-
Name: "file_size_bytes",
31-
Help: "The size of the file opened by limitopen.Open",
27+
Name: "limitopen_file_size_bytes",
28+
Help: "The size of the file opened by limitopen.Open",
3229
}, sizeLabels)
3330

3431
softLimitCounter = promauto.With(prometheusbpint.GlobalRegistry).NewCounterVec(prometheus.CounterOpts{
35-
Namespace: promNamespace,
36-
Name: "softlimit_violation_total",
37-
Help: "The total number of violations of softlimit",
32+
Name: "limitopen_softlimit_violation_total",
33+
Help: "The total number of violations of softlimit",
3834
}, sizeLabels)
3935
)
4036

kafkabp/prometheus.go

Lines changed: 17 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -9,27 +9,17 @@ import (
99
)
1010

1111
const (
12-
promNamespace = "kafkabp"
13-
14-
subsystemConsumer = "consumer"
15-
subsystemGroupConsumer = "group_consumer"
16-
17-
successLabel = "kafka_success"
18-
topicLabel = "kafka_topic"
12+
topicLabel = "kafka_topic"
1913
)
2014

2115
var (
2216
rebalanceTotalCounter = promauto.With(prometheusbpint.GlobalRegistry).NewCounter(prometheus.CounterOpts{
23-
Namespace: promNamespace,
24-
Subsystem: subsystemConsumer,
25-
Name: "rebalances_total",
26-
Help: "The number of times consumer rebalance happened",
17+
Name: "kafkabp_consumer_rebalances_total",
18+
Help: "The number of times consumer rebalance happened",
2719
})
2820
rebalanceFailureCounter = promauto.With(prometheusbpint.GlobalRegistry).NewCounter(prometheus.CounterOpts{
29-
Namespace: promNamespace,
30-
Subsystem: subsystemConsumer,
31-
Name: "rebalance_failures_total",
32-
Help: "The number of times consumer rebalance failed",
21+
Name: "kafkabp_consumer_rebalance_failures_total",
22+
Help: "The number of times consumer rebalance failed",
3323
})
3424
)
3525

@@ -38,34 +28,26 @@ var (
3828
topicLabel,
3929
}
4030

41-
consumerTimer = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
42-
Namespace: promNamespace,
43-
Subsystem: subsystemConsumer,
44-
Name: "duration_seconds",
45-
Help: "The time took for a non-group consumer to consume a single kafka message",
46-
Buckets: prometheusbp.DefaultLatencyBuckets,
47-
}, timerLabels)
31+
consumerTimer = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
32+
Name: "kafkabp_consumer_duration_seconds",
33+
Help: "The time took for a non-group consumer to consume a single kafka message",
34+
}.ToPrometheus(), timerLabels)
4835

49-
groupConsumerTimer = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheus.HistogramOpts{
50-
Namespace: promNamespace,
51-
Subsystem: subsystemGroupConsumer,
52-
Name: "duration_seconds",
53-
Help: "The time took for a group consumer to consume a single kafka message",
54-
Buckets: prometheusbp.DefaultLatencyBuckets,
55-
}, timerLabels)
36+
groupConsumerTimer = promauto.With(prometheusbpint.GlobalRegistry).NewHistogramVec(prometheusbp.HistogramOpts{
37+
Name: "kafkabp_group_consumer_duration_seconds",
38+
Help: "The time took for a group consumer to consume a single kafka message",
39+
}.ToPrometheus(), timerLabels)
5640
)
5741

5842
var (
5943
awsRackFailure = promauto.With(prometheusbpint.GlobalRegistry).NewCounter(prometheus.CounterOpts{
60-
Namespace: promNamespace,
61-
Name: "aws_rack_id_failures_total",
62-
Help: "Total failures of getting rack id from AWS endpoint",
44+
Name: "kafkabp_aws_rack_id_failures_total",
45+
Help: "Total failures of getting rack id from AWS endpoint",
6346
})
6447

6548
httpRackFailure = promauto.With(prometheusbpint.GlobalRegistry).NewCounter(prometheus.CounterOpts{
66-
Namespace: promNamespace,
67-
Name: "http_rack_id_failures_total",
68-
Help: "Total failures of getting rack id from http endpoint",
49+
Name: "kafkabp_http_rack_id_failures_total",
50+
Help: "Total failures of getting rack id from http endpoint",
6951
})
7052
)
7153

log/core_wrapper.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -6,18 +6,18 @@ import (
66

77
"go.uber.org/zap/zapcore"
88

9-
"github.com/prometheus/client_golang/prometheus"
109
"github.com/prometheus/client_golang/prometheus/promauto"
1110
"github.com/reddit/baseplate.go/internal/prometheusbpint"
1211
"github.com/reddit/baseplate.go/internal/thriftint"
12+
"github.com/reddit/baseplate.go/prometheusbp"
1313
)
1414

1515
var (
1616
logWriteDurationSeconds = promauto.With(prometheusbpint.GlobalRegistry).NewHistogram(
17-
prometheus.HistogramOpts{
18-
Name: "baseplate_log_write_duration_seconds",
19-
Help: "Latency of log writes",
20-
Buckets: []float64{
17+
prometheusbp.HistogramOpts{
18+
Name: "baseplate_log_write_duration_seconds",
19+
Help: "Latency of log writes",
20+
LegacyBuckets: []float64{
2121
0.000_005,
2222
0.000_010,
2323
0.000_050,
@@ -31,7 +31,7 @@ var (
3131
0.5,
3232
1.0,
3333
},
34-
},
34+
}.ToPrometheus(),
3535
)
3636
)
3737

log/wrapper.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -306,10 +306,8 @@ type Counter interface {
306306
//
307307
// // a global variable
308308
// var tracingFailures = promauto.NewCounter(prometheus.CounterOpts{
309-
// Namespace: "myservice",
310-
// Subsystem: "tracing",
311-
// Name: "failures_total",
312-
// Help: "Total number of failures when sending tracing spans to the sidecar",
309+
// Name: "myservice_tracing_failures_total",
310+
// Help: "Total number of failures when sending tracing spans to the sidecar",
313311
// })
314312
//
315313
// // in main

0 commit comments

Comments
 (0)