|
| 1 | +// Copyright The OpenTelemetry Authors |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +package internal |
| 5 | + |
| 6 | +import ( |
| 7 | + "context" |
| 8 | + "fmt" |
| 9 | + "testing" |
| 10 | + |
| 11 | + "github.com/prometheus/common/model" |
| 12 | + "github.com/prometheus/prometheus/config" |
| 13 | + "github.com/prometheus/prometheus/model/histogram" |
| 14 | + "github.com/prometheus/prometheus/model/labels" |
| 15 | + "github.com/prometheus/prometheus/scrape" |
| 16 | + "github.com/prometheus/prometheus/tsdb/tsdbutil" |
| 17 | + "github.com/stretchr/testify/assert" |
| 18 | + "go.opentelemetry.io/collector/component" |
| 19 | + "go.opentelemetry.io/collector/consumer/consumertest" |
| 20 | + "go.opentelemetry.io/collector/pdata/pmetric" |
| 21 | + "go.opentelemetry.io/collector/receiver/receiverhelper" |
| 22 | + "go.opentelemetry.io/collector/receiver/receivertest" |
| 23 | + |
| 24 | + "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/translator/prometheus" |
| 25 | + "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/prometheusreceiver/internal/metadata" |
| 26 | +) |
| 27 | + |
| 28 | +const ( |
| 29 | + numSeries = 10000 |
| 30 | +) |
| 31 | + |
| 32 | +var ( |
| 33 | + benchTarget = scrape.NewTarget( |
| 34 | + labels.FromMap(map[string]string{ |
| 35 | + model.InstanceLabel: "localhost:8080", |
| 36 | + model.JobLabel: "benchmark", |
| 37 | + }), |
| 38 | + &config.ScrapeConfig{}, |
| 39 | + map[model.LabelName]model.LabelValue{ |
| 40 | + model.AddressLabel: "localhost:8080", |
| 41 | + model.SchemeLabel: "http", |
| 42 | + }, |
| 43 | + nil, |
| 44 | + ) |
| 45 | + |
| 46 | + benchCtx = scrape.ContextWithTarget(context.Background(), benchTarget) |
| 47 | +) |
| 48 | + |
| 49 | +// BenchmarkAppend benchmarks the Append method of the transaction. |
| 50 | +// It tests the performance of appending classic metric types (counters, gauges, summaries, histograms). |
| 51 | +func BenchmarkAppend(b *testing.B) { |
| 52 | + benchmarkAppend(b) |
| 53 | +} |
| 54 | + |
| 55 | +func benchmarkAppend(b *testing.B) { |
| 56 | + labelSets := generateLabelSets(numSeries, 50) |
| 57 | + timestamp := int64(1234567890) |
| 58 | + |
| 59 | + b.ResetTimer() |
| 60 | + b.ReportAllocs() |
| 61 | + |
| 62 | + for i := 0; i < b.N; i++ { |
| 63 | + b.StopTimer() |
| 64 | + tx := newBenchmarkTransaction(b) |
| 65 | + b.StartTimer() |
| 66 | + |
| 67 | + for j, ls := range labelSets { |
| 68 | + value := float64(j) |
| 69 | + _, err := tx.Append(0, ls, timestamp, value) |
| 70 | + assert.NoError(b, err) |
| 71 | + } |
| 72 | + } |
| 73 | +} |
| 74 | + |
| 75 | +// BenchmarkAppendHistogram benchmarks the AppendHistogram method of the transaction. |
| 76 | +// It tests the performance of appending native histogram metrics. |
| 77 | +func BenchmarkAppendHistogram(b *testing.B) { |
| 78 | + benchmarkAppendHistogram(b) |
| 79 | +} |
| 80 | + |
| 81 | +func benchmarkAppendHistogram(b *testing.B) { |
| 82 | + labelSets := generateLabelSets(numSeries, 50) |
| 83 | + histograms := generateNativeHistograms(numSeries) |
| 84 | + timestamp := int64(1234567890) |
| 85 | + |
| 86 | + b.ResetTimer() |
| 87 | + b.ReportAllocs() |
| 88 | + |
| 89 | + for i := 0; i < b.N; i++ { |
| 90 | + b.StopTimer() |
| 91 | + tx := newBenchmarkTransaction(b) |
| 92 | + tx.enableNativeHistograms = true |
| 93 | + b.StartTimer() |
| 94 | + |
| 95 | + for j := range labelSets { |
| 96 | + _, err := tx.AppendHistogram(0, labelSets[j], timestamp, histograms[j], nil) |
| 97 | + assert.NoError(b, err) |
| 98 | + } |
| 99 | + } |
| 100 | +} |
| 101 | + |
| 102 | +// BenchmarkCommit benchmarks the Commit method which converts accumulated metrics to pmetrics format |
| 103 | +// and delivers them to the consumer. This is separate from Append/AppendHistogram to measure the |
| 104 | +// conversion and delivery overhead independently. |
| 105 | +// Note: The presence of target_info and otel_scope_info metrics affects the performance of the Commit method, |
| 106 | +// so they are benchmarked in sub-benchmarks. |
| 107 | +func BenchmarkCommit(b *testing.B) { |
| 108 | + b.Run("ClassicMetrics", func(b *testing.B) { |
| 109 | + b.Run("Baseline", func(b *testing.B) { |
| 110 | + benchmarkCommit(b, false, false, false) |
| 111 | + }) |
| 112 | + |
| 113 | + b.Run("WithTargetInfo", func(b *testing.B) { |
| 114 | + benchmarkCommit(b, false, true, false) |
| 115 | + }) |
| 116 | + |
| 117 | + b.Run("WithScopeInfo", func(b *testing.B) { |
| 118 | + benchmarkCommit(b, false, false, true) |
| 119 | + }) |
| 120 | + }) |
| 121 | + |
| 122 | + b.Run("NativeHistogram", func(b *testing.B) { |
| 123 | + b.Run("Baseline", func(b *testing.B) { |
| 124 | + benchmarkCommit(b, true, false, false) |
| 125 | + }) |
| 126 | + |
| 127 | + b.Run("WithTargetInfo", func(b *testing.B) { |
| 128 | + benchmarkCommit(b, true, true, false) |
| 129 | + }) |
| 130 | + |
| 131 | + b.Run("WithScopeInfo", func(b *testing.B) { |
| 132 | + benchmarkCommit(b, true, false, true) |
| 133 | + }) |
| 134 | + }) |
| 135 | +} |
| 136 | + |
| 137 | +func benchmarkCommit(b *testing.B, useNativeHistograms, withTargetInfo, withScopeInfo bool) { |
| 138 | + labelSets := generateLabelSets(numSeries, 50) |
| 139 | + var histograms []*histogram.Histogram |
| 140 | + if useNativeHistograms { |
| 141 | + histograms = generateNativeHistograms(numSeries) |
| 142 | + } |
| 143 | + timestamp := int64(1234567890) |
| 144 | + |
| 145 | + b.ResetTimer() |
| 146 | + b.ReportAllocs() |
| 147 | + |
| 148 | + for i := 0; i < b.N; i++ { |
| 149 | + // Setup: Create transaction and append all data (not timed) |
| 150 | + b.StopTimer() |
| 151 | + tx := newBenchmarkTransaction(b) |
| 152 | + if useNativeHistograms { |
| 153 | + tx.enableNativeHistograms = true |
| 154 | + } |
| 155 | + |
| 156 | + if withTargetInfo { |
| 157 | + targetInfoLabels := createTargetInfoLabels() |
| 158 | + _, _ = tx.Append(0, targetInfoLabels, timestamp, 1) |
| 159 | + } |
| 160 | + |
| 161 | + if withScopeInfo { |
| 162 | + scopeInfoLabels := createScopeInfoLabels() |
| 163 | + _, _ = tx.Append(0, scopeInfoLabels, timestamp, 1) |
| 164 | + } |
| 165 | + |
| 166 | + if useNativeHistograms { |
| 167 | + for j := range labelSets { |
| 168 | + _, _ = tx.AppendHistogram(0, labelSets[j], timestamp, histograms[j], nil) |
| 169 | + } |
| 170 | + } else { |
| 171 | + for j, ls := range labelSets { |
| 172 | + _, _ = tx.Append(0, ls, timestamp, float64(j)) |
| 173 | + } |
| 174 | + } |
| 175 | + b.StartTimer() |
| 176 | + |
| 177 | + // Benchmark: Only measure Commit |
| 178 | + err := tx.Commit() |
| 179 | + assert.NoError(b, err) |
| 180 | + } |
| 181 | +} |
| 182 | + |
| 183 | +// newBenchmarkTransaction creates a new transaction configured for benchmarking. |
| 184 | +// It uses a no-op consumer and minimal configuration to isolate transaction performance. |
| 185 | +func newBenchmarkTransaction(b *testing.B) *transaction { |
| 186 | + b.Helper() |
| 187 | + |
| 188 | + sink := new(consumertest.MetricsSink) |
| 189 | + settings := receivertest.NewNopSettings(metadata.Type) |
| 190 | + adjuster := &noOpAdjuster{} |
| 191 | + obsrecv, err := receiverhelper.NewObsReport(receiverhelper.ObsReportSettings{ |
| 192 | + ReceiverID: component.MustNewID("prometheus"), |
| 193 | + Transport: "http", |
| 194 | + ReceiverCreateSettings: settings, |
| 195 | + }) |
| 196 | + if err != nil { |
| 197 | + b.Fatalf("Failed to create ObsReport: %v", err) |
| 198 | + } |
| 199 | + |
| 200 | + tx := newTransaction( |
| 201 | + benchCtx, |
| 202 | + adjuster, |
| 203 | + sink, |
| 204 | + labels.EmptyLabels(), // no external labels |
| 205 | + settings, |
| 206 | + obsrecv, |
| 207 | + false, // trimSuffixes |
| 208 | + false, // enableNativeHistograms (not needed for Append benchmark) |
| 209 | + false, // useMetadata |
| 210 | + ) |
| 211 | + |
| 212 | + // Set a mock MetricMetadataStore to avoid nil pointer issues |
| 213 | + tx.mc = &mockMetadataStore{} |
| 214 | + |
| 215 | + return tx |
| 216 | +} |
| 217 | + |
| 218 | +// generateLabelSets creates label sets for benchmarking with the specified cardinality. |
| 219 | +func generateLabelSets(seriesCount, cardinality int) []labels.Labels { |
| 220 | + result := make([]labels.Labels, seriesCount) |
| 221 | + |
| 222 | + for i := 0; i < seriesCount; i++ { |
| 223 | + lbls := labels.NewBuilder(labels.EmptyLabels()) |
| 224 | + lbls.Set(model.MetricNameLabel, fmt.Sprintf("metric_%d", i)) |
| 225 | + |
| 226 | + for j := 0; j < cardinality; j++ { |
| 227 | + lbls.Set(fmt.Sprintf("label_%d", j), fmt.Sprintf("value_%d_%d", i, j)) |
| 228 | + } |
| 229 | + |
| 230 | + result[i] = lbls.Labels() |
| 231 | + } |
| 232 | + |
| 233 | + return result |
| 234 | +} |
| 235 | + |
| 236 | +// generateNativeHistograms creates native histogram instances for benchmarking. |
| 237 | +// Uses Prometheus's test histogram generator for realistic histogram structures. |
| 238 | +func generateNativeHistograms(count int) []*histogram.Histogram { |
| 239 | + result := make([]*histogram.Histogram, count) |
| 240 | + |
| 241 | + for i := 0; i < count; i++ { |
| 242 | + // Use tsdbutil.GenerateTestHistogram to create realistic native histograms |
| 243 | + // The parameter controls the histogram ID, which varies the bucket counts slightly |
| 244 | + result[i] = tsdbutil.GenerateTestHistogram(int64(i)) |
| 245 | + } |
| 246 | + |
| 247 | + return result |
| 248 | +} |
| 249 | + |
| 250 | +// createTargetInfoLabels creates labels for a target_info metric. |
| 251 | +// The target_info metric is used to add resource attributes to metrics. |
| 252 | +func createTargetInfoLabels() labels.Labels { |
| 253 | + return labels.FromMap(map[string]string{ |
| 254 | + model.MetricNameLabel: prometheus.TargetInfoMetricName, |
| 255 | + model.JobLabel: "benchmark", |
| 256 | + model.InstanceLabel: "localhost:8080", |
| 257 | + "environment": "test", |
| 258 | + "region": "us-west-2", |
| 259 | + "cluster": "benchmark-cluster", |
| 260 | + }) |
| 261 | +} |
| 262 | + |
| 263 | +// createScopeInfoLabels creates labels for an otel_scope_info metric. |
| 264 | +// The otel_scope_info metric is used to add scope-level attributes. |
| 265 | +func createScopeInfoLabels() labels.Labels { |
| 266 | + return labels.FromMap(map[string]string{ |
| 267 | + model.MetricNameLabel: prometheus.ScopeInfoMetricName, |
| 268 | + model.JobLabel: "benchmark", |
| 269 | + model.InstanceLabel: "localhost:8080", |
| 270 | + prometheus.ScopeNameLabelKey: "benchmark.scope", |
| 271 | + prometheus.ScopeVersionLabelKey: "1.0.0", |
| 272 | + "scope_attribute": "test_value", |
| 273 | + }) |
| 274 | +} |
| 275 | + |
| 276 | +// noOpAdjuster is a MetricsAdjuster that doesn't modify metrics. |
| 277 | +// This isolates the transaction performance from adjustment overhead. |
| 278 | +type noOpAdjuster struct{} |
| 279 | + |
| 280 | +func (*noOpAdjuster) AdjustMetrics(_ pmetric.Metrics) error { |
| 281 | + return nil |
| 282 | +} |
| 283 | + |
| 284 | +// mockMetadataStore is a minimal implementation of scrape.MetricMetadataStore for testing |
| 285 | +type mockMetadataStore struct{} |
| 286 | + |
| 287 | +func (*mockMetadataStore) ListMetadata() []scrape.MetricMetadata { |
| 288 | + return nil |
| 289 | +} |
| 290 | + |
| 291 | +func (*mockMetadataStore) GetMetadata(_ string) (scrape.MetricMetadata, bool) { |
| 292 | + return scrape.MetricMetadata{}, false |
| 293 | +} |
| 294 | + |
| 295 | +func (*mockMetadataStore) SizeMetadata() int { |
| 296 | + return 0 |
| 297 | +} |
| 298 | + |
| 299 | +func (*mockMetadataStore) LengthMetadata() int { |
| 300 | + return 0 |
| 301 | +} |
0 commit comments