diff --git a/api/flowcollector/v1beta2/flowcollector_types.go b/api/flowcollector/v1beta2/flowcollector_types.go
index 16582d616..cf986e34a 100644
--- a/api/flowcollector/v1beta2/flowcollector_types.go
+++ b/api/flowcollector/v1beta2/flowcollector_types.go
@@ -70,14 +70,14 @@ type FlowCollectorSpec struct {
ConsolePlugin FlowCollectorConsolePlugin `json:"consolePlugin,omitempty"`
// `deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- // - `Direct` (default) to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
- // - `Service` to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
+ // - `Service` (default) to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
// - `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
+ // - `Direct` to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
// Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
// `Direct` is not recommended on large clusters as it is less memory efficient.
// +unionDiscriminator
- // +kubebuilder:validation:Enum:="Direct";"Service";"Kafka"
- // +kubebuilder:default:=Direct
+ // +kubebuilder:validation:Enum:="Service";"Direct";"Kafka"
+ // +kubebuilder:default:=Service
DeploymentModel FlowCollectorDeploymentModel `json:"deploymentModel,omitempty"`
// Kafka configuration, allowing to use Kafka as a broker as part of the flow collection pipeline. Available when the `spec.deploymentModel` is `Kafka`.
diff --git a/api/flowcollector/v1beta2/helper.go b/api/flowcollector/v1beta2/helper.go
index 190d33ff5..a8c5e6d6f 100644
--- a/api/flowcollector/v1beta2/helper.go
+++ b/api/flowcollector/v1beta2/helper.go
@@ -55,10 +55,7 @@ func (spec *FlowCollectorSpec) UseConsolePlugin() bool {
func (spec *FlowCollectorSpec) UseTestConsolePlugin() bool {
if spec.ConsolePlugin.Advanced != nil {
- env := spec.ConsolePlugin.Advanced.Env[constants.EnvTestConsole]
- // Use ParseBool to allow common variants ("true", "True", "1"...) and ignore non-bools
- b, err := strconv.ParseBool(env)
- return err == nil && b
+ return IsEnvEnabled(spec.ConsolePlugin.Advanced.Env, constants.EnvTestConsole)
}
return false
}
@@ -185,10 +182,7 @@ func (spec *FlowCollectorFLP) GetMetricsPort() int32 {
func (spec *FlowCollectorSpec) HasExperimentalAlertsHealth() bool {
if spec.Processor.Advanced != nil {
- env := spec.Processor.Advanced.Env["EXPERIMENTAL_ALERTS_HEALTH"]
- // Use ParseBool to allow common variants ("true", "True", "1"...) and ignore non-bools
- b, err := strconv.ParseBool(env)
- return err == nil && b
+ return IsEnvEnabled(spec.Processor.Advanced.Env, "EXPERIMENTAL_ALERTS_HEALTH")
}
return false
}
@@ -227,3 +221,10 @@ func (spec *FlowCollectorConsolePlugin) IsUnmanagedConsolePluginReplicas() bool
}
return spec.Autoscaler.IsHPAEnabled()
}
+
+func IsEnvEnabled(vars map[string]string, key string) bool {
+ env := vars[key]
+ // Use ParseBool to allow common variants ("true", "True", "1"...) and ignore non-bools
+ b, err := strconv.ParseBool(env)
+ return err == nil && b
+}
diff --git a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
index b741295ad..068343834 100644
--- a/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
+++ b/bundle/manifests/flows.netobserv.io_flowcollectors.yaml
@@ -3217,17 +3217,17 @@ spec:
type: boolean
type: object
deploymentModel:
- default: Direct
+ default: Service
description: |-
`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- - `Direct` (default) to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
- - `Service` to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
+ - `Service` (default) to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
+ - `Direct` to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
`Direct` is not recommended on large clusters as it is less memory efficient.
enum:
- - Direct
- Service
+ - Direct
- Kafka
type: string
exporters:
diff --git a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
index 3addd7bc5..7cb8ad17e 100644
--- a/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
+++ b/config/crd/bases/flows.netobserv.io_flowcollectors.yaml
@@ -3011,17 +3011,17 @@ spec:
type: boolean
type: object
deploymentModel:
- default: Direct
+ default: Service
description: |-
`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- - `Direct` (default) to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
- - `Service` to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
+ - `Service` (default) to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
+ - `Direct` to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
`Direct` is not recommended on large clusters as it is less memory efficient.
enum:
- - Direct
- Service
+ - Direct
- Kafka
type: string
exporters:
diff --git a/docs/FlowCollector.md b/docs/FlowCollector.md
index 1abc9937f..0b6a9002c 100644
--- a/docs/FlowCollector.md
+++ b/docs/FlowCollector.md
@@ -112,14 +112,14 @@ for these features as a best effort only.
enum |
`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
-- `Direct` (default) to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
-- `Service` to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
+- `Service` (default) to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
+- `Direct` to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
`Direct` is not recommended on large clusters as it is less memory efficient.
- Enum: Direct, Service, Kafka
- Default: Direct
+ Enum: Service, Direct, Kafka
+ Default: Service
|
false |
diff --git a/go.mod b/go.mod
index 726a671d9..309f4d6e0 100644
--- a/go.mod
+++ b/go.mod
@@ -9,8 +9,8 @@ require (
github.com/coreos/go-semver v0.3.1
github.com/google/go-cmp v0.7.0
github.com/grafana/loki/operator/apis/loki v0.0.0-20241021105923-5e970e50b166
- github.com/netobserv/flowlogs-pipeline v1.10.0-community
- github.com/netobserv/netobserv-ebpf-agent v1.10.0-community
+ github.com/netobserv/flowlogs-pipeline v1.10.0-community.0.20251205170812-75a990e42a64
+ github.com/netobserv/netobserv-ebpf-agent v1.10.0-community.0.20251125162210-4be10c36721e
github.com/onsi/ginkgo/v2 v2.27.2
github.com/onsi/gomega v1.38.2
github.com/openshift/api v0.0.0-20250707164913-2cd5821c9080
@@ -80,32 +80,32 @@ require (
github.com/stoewer/go-strcase v1.3.1 // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/x448/float16 v0.8.4 // indirect
- go.opentelemetry.io/auto/sdk v1.1.0 // indirect
+ go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 // indirect
- go.opentelemetry.io/otel v1.37.0 // indirect
+ go.opentelemetry.io/otel v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 // indirect
- go.opentelemetry.io/otel/metric v1.37.0 // indirect
- go.opentelemetry.io/otel/sdk v1.37.0 // indirect
- go.opentelemetry.io/otel/trace v1.37.0 // indirect
+ go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b // indirect
- golang.org/x/mod v0.28.0 // indirect
- golang.org/x/net v0.46.0 // indirect
+ golang.org/x/mod v0.29.0 // indirect
+ golang.org/x/net v0.47.0 // indirect
golang.org/x/oauth2 v0.32.0 // indirect
- golang.org/x/sync v0.17.0 // indirect
- golang.org/x/sys v0.37.0 // indirect
- golang.org/x/term v0.36.0 // indirect
- golang.org/x/text v0.30.0 // indirect
+ golang.org/x/sync v0.18.0 // indirect
+ golang.org/x/sys v0.38.0 // indirect
+ golang.org/x/term v0.37.0 // indirect
+ golang.org/x/text v0.31.0 // indirect
golang.org/x/time v0.12.0 // indirect
- golang.org/x/tools v0.37.0 // indirect
+ golang.org/x/tools v0.38.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.5.0 // indirect
- google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b // indirect
- google.golang.org/grpc v1.76.0 // indirect
+ google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 // indirect
+ google.golang.org/grpc v1.77.0 // indirect
google.golang.org/protobuf v1.36.10 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
diff --git a/go.sum b/go.sum
index 81cfe4db4..a03523ae1 100644
--- a/go.sum
+++ b/go.sum
@@ -14,8 +14,8 @@ github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7
github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cilium/ebpf v0.19.0 h1:Ro/rE64RmFBeA9FGjcTc+KmCeY6jXmryu6FfnzPRIao=
-github.com/cilium/ebpf v0.19.0/go.mod h1:fLCgMo3l8tZmAdM3B2XqdFzXBpwkcSTroaVqN08OWVY=
+github.com/cilium/ebpf v0.20.0 h1:atwWj9d3NffHyPZzVlx3hmw1on5CLe9eljR8VuHTwhM=
+github.com/cilium/ebpf v0.20.0/go.mod h1:pzLjFymM+uZPLk/IXZUL63xdx5VXEo+enTzxkZXdycw=
github.com/coreos/go-semver v0.3.1 h1:yi21YpKnrx1gt5R+la8n5WgS0kCrsPp33dmEyHReZr4=
github.com/coreos/go-semver v0.3.1/go.mod h1:irMmmIw/7yzSRPWryHsK7EYSg09caPQL03VsM8rvUec=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -121,10 +121,10 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
-github.com/netobserv/flowlogs-pipeline v1.10.0-community h1:3Km5B+UmDrYPeiwhWY43YYEIlzjbRfErKMQ67PtMZWQ=
-github.com/netobserv/flowlogs-pipeline v1.10.0-community/go.mod h1:CuhvsFU2p15A9RPWSOGZaaCfjsocn7ZYtxKH2hAehZY=
-github.com/netobserv/netobserv-ebpf-agent v1.10.0-community h1:XLauF3k+G2RV2EgHvYK+r3zq0FRm+k7P3hq0I0J6GzE=
-github.com/netobserv/netobserv-ebpf-agent v1.10.0-community/go.mod h1:n75b6DyE/soLE794JlxILqt8rd/KoPfSoUs1WN4jzqg=
+github.com/netobserv/flowlogs-pipeline v1.10.0-community.0.20251205170812-75a990e42a64 h1:o689WJgpAzrYknulS+B4JCYUufZccSR8E2tqGkBmOi4=
+github.com/netobserv/flowlogs-pipeline v1.10.0-community.0.20251205170812-75a990e42a64/go.mod h1:JWykocBDD11PwipFq6YGs37SmLBYu4u1F1CgupEEt1I=
+github.com/netobserv/netobserv-ebpf-agent v1.10.0-community.0.20251125162210-4be10c36721e h1:hGJIbcfTbzjpuZ9K7hVwD/6+KijR0TfqJjmXeigQELc=
+github.com/netobserv/netobserv-ebpf-agent v1.10.0-community.0.20251125162210-4be10c36721e/go.mod h1:ZQf6WKnhdfdaR5PQ/Fc99l4/doL94OQeTkQknwFiQZo=
github.com/netsampler/goflow2 v1.3.7 h1:XZaTy8kkMnGXpJ9hS3KbO1McyrFTpVNhVFEx9rNhMmc=
github.com/netsampler/goflow2 v1.3.7/go.mod h1:4UZsVGVAs//iMCptUHn3WNScztJeUhZH7kDW2+/vDdQ=
github.com/onsi/ginkgo/v2 v2.27.2 h1:LzwLj0b89qtIy6SSASkzlNvX6WktqurSHwkk2ipF/Ns=
@@ -150,8 +150,8 @@ github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+L
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
github.com/prometheus/procfs v0.17.0 h1:FuLQ+05u4ZI+SS/w9+BWEM2TXiHKsUQ9TADiRH7DuK0=
github.com/prometheus/procfs v0.17.0/go.mod h1:oPQLaDAMRbA+u8H5Pbfq+dl3VDAvHxMUOVhe0wYB2zw=
-github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
-github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
+github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
+github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc=
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik=
@@ -198,24 +198,24 @@ github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
-go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
+go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
+go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY=
-go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ=
-go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I=
+go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
+go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI=
-go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE=
-go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E=
-go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI=
-go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg=
-go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc=
-go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps=
-go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4=
-go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0=
+go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
+go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
+go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
+go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
+go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
+go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
+go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os=
go.opentelemetry.io/proto/otlp v1.7.0/go.mod h1:fSKjH6YJ7HDlwzltzyMj036AJ3ejJLCgCSHGj4efDDo=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -235,41 +235,41 @@ golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/y
golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U=
-golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI=
+golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA=
+golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4=
-golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210=
+golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
+golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/oauth2 v0.32.0 h1:jsCblLleRMDrxMN29H3z/k1KliIvpLgCkE6R8FXXNgY=
golang.org/x/oauth2 v0.32.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug=
-golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
+golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
+golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ=
-golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
-golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q=
-golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss=
+golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
+golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
+golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU=
+golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k=
-golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM=
+golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM=
+golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE=
-golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w=
+golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ=
+golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -278,12 +278,12 @@ gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0
gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY=
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
-google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b h1:ULiyYQ0FdsJhwwZUwbaXpZF5yUE3h+RA+gxvBu37ucc=
-google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:oDOGiMSXHL4sDTJvFvIB9nRQCGdLP1o/iVaqQK8zB+M=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b h1:zPKJod4w6F1+nRGDI9ubnXYhU9NSWoFAijkHkUXeTK8=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
-google.golang.org/grpc v1.76.0 h1:UnVkv1+uMLYXoIz6o7chp59WfQUYA2ex/BXQ9rHZu7A=
-google.golang.org/grpc v1.76.0/go.mod h1:Ju12QI8M6iQJtbcsV+awF5a4hfJMLi4X0JLo94ULZ6c=
+google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8 h1:mepRgnBZa07I4TRuomDE4sTIYieg/osKmzIf4USdWS4=
+google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8/go.mod h1:fDMmzKV90WSg1NbozdqrE64fkuTv6mlq2zxo9ad+3yo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8 h1:M1rk8KBnUsBDg1oPGHNCxG4vc1f49epmTO7xscSajMk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
+google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
+google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
diff --git a/helm/crds/flows.netobserv.io_flowcollectors.yaml b/helm/crds/flows.netobserv.io_flowcollectors.yaml
index 153219ed7..100e8fc22 100644
--- a/helm/crds/flows.netobserv.io_flowcollectors.yaml
+++ b/helm/crds/flows.netobserv.io_flowcollectors.yaml
@@ -3015,17 +3015,17 @@ spec:
type: boolean
type: object
deploymentModel:
- default: Direct
+ default: Service
description: |-
`deploymentModel` defines the desired type of deployment for flow processing. Possible values are:
- - `Direct` (default) to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
- - `Service` to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
+ - `Service` (default) to make the flow processor listen as a Kubernetes Service, backed by a scalable Deployment.
- `Kafka` to make flows sent to a Kafka pipeline before consumption by the processor.
+ - `Direct` to make the flow processor listen directly from the agents using the host network, backed by a DaemonSet. Only recommended on small clusters, below 15 nodes.
Kafka can provide better scalability, resiliency, and high availability (for more details, see https://www.redhat.com/en/topics/integration/what-is-apache-kafka).
`Direct` is not recommended on large clusters as it is less memory efficient.
enum:
- - Direct
- Service
+ - Direct
- Kafka
type: string
exporters:
diff --git a/internal/controller/ebpf/agent_controller.go b/internal/controller/ebpf/agent_controller.go
index e4028a47c..cd639c423 100644
--- a/internal/controller/ebpf/agent_controller.go
+++ b/internal/controller/ebpf/agent_controller.go
@@ -37,6 +37,7 @@ const (
envAgentIP = "AGENT_IP"
envFlowsTargetHost = "TARGET_HOST"
envFlowsTargetPort = "TARGET_PORT"
+ envTargetTLSCACertPath = "TARGET_TLS_CA_CERT_PATH"
envSampling = "SAMPLING"
envExport = "EXPORT"
envKafkaBrokers = "KAFKA_BROKERS"
@@ -481,14 +482,30 @@ func (c *AgentController) envConfig(ctx context.Context, coll *flowslatest.FlowC
Value: strconv.Itoa(int(*advancedConfig.Port)),
})
} else {
- // Send to FLP service
- config = append(config, corev1.EnvVar{
- Name: envFlowsTargetHost,
- Value: fmt.Sprintf("%s.%s.svc", constants.FLPName, c.Namespace),
- }, corev1.EnvVar{
- Name: envFlowsTargetPort,
- Value: strconv.Itoa(int(*advancedConfig.Port)),
- })
+ skipTLS := flowslatest.IsEnvEnabled(advancedConfig.Env, "SERVER_NOTLS")
+ if !skipTLS {
+ // Send to FLP service using TLS
+ tlsCfg := flowslatest.ClientTLS{
+ Enable: true,
+ CACert: flowslatest.CertificateReference{
+ Type: flowslatest.RefTypeConfigMap,
+ Name: "openshift-service-ca.crt",
+ CertFile: "service-ca.crt",
+ },
+ }
+ caPath := c.volumes.AddCACertificate(&tlsCfg, "svc-certs")
+ config = append(config, corev1.EnvVar{Name: envTargetTLSCACertPath, Value: caPath})
+ }
+ config = append(config,
+ corev1.EnvVar{
+ Name: envFlowsTargetHost,
+ Value: fmt.Sprintf("%s.%s.svc", constants.FLPName, c.Namespace),
+ },
+ corev1.EnvVar{
+ Name: envFlowsTargetPort,
+ Value: strconv.Itoa(int(*advancedConfig.Port)),
+ },
+ )
}
}
diff --git a/internal/controller/ebpf/agent_controller_test.go b/internal/controller/ebpf/agent_controller_test.go
index 6a136308c..5a531ddaf 100644
--- a/internal/controller/ebpf/agent_controller_test.go
+++ b/internal/controller/ebpf/agent_controller_test.go
@@ -214,9 +214,9 @@ func TestBpfmanConfig(t *testing.T) {
assert.NotNil(t, ds)
assert.Equal(t, corev1.EnvVar{Name: "EBPF_PROGRAM_MANAGER_MODE", Value: "true"}, ds.Spec.Template.Spec.Containers[0].Env[0])
- assert.Equal(t, "bpfman-maps", ds.Spec.Template.Spec.Volumes[0].Name)
+ assert.Equal(t, "bpfman-maps", ds.Spec.Template.Spec.Volumes[1].Name)
assert.Equal(t, map[string]string{
"csi.bpfman.io/maps": "direct_flows,aggregated_flows,additional_flow_metrics,packet_record,dns_flows,global_counters,filter_map,peer_filter_map,ipsec_ingress_map,ipsec_egress_map",
"csi.bpfman.io/program": "netobserv",
- }, ds.Spec.Template.Spec.Volumes[0].CSI.VolumeAttributes)
+ }, ds.Spec.Template.Spec.Volumes[1].CSI.VolumeAttributes)
}
diff --git a/internal/controller/flowcollector_controller_minimal_test.go b/internal/controller/flowcollector_controller_minimal_test.go
index eedc6aed0..046f91e6b 100644
--- a/internal/controller/flowcollector_controller_minimal_test.go
+++ b/internal/controller/flowcollector_controller_minimal_test.go
@@ -57,10 +57,10 @@ func flowCollectorMinimalSpecs() {
return k8sClient.Get(ctx, agentKey, &ds)
}, timeout, interval).Should(Succeed())
- By("Expecting to create the flowlogs-pipeline DaemonSet")
+ By("Expecting to create the flowlogs-pipeline Deployment")
Eventually(func() error {
- ds := appsv1.DaemonSet{}
- return k8sClient.Get(ctx, flpKey, &ds)
+ depl := appsv1.Deployment{}
+ return k8sClient.Get(ctx, flpKey, &depl)
}, timeout, interval).Should(Succeed())
By("Expecting to create the console plugin Deployment")
diff --git a/internal/controller/flp/flp_common_objects.go b/internal/controller/flp/flp_common_objects.go
index 417ff18a5..98d076abc 100644
--- a/internal/controller/flp/flp_common_objects.go
+++ b/internal/controller/flp/flp_common_objects.go
@@ -33,10 +33,23 @@ const (
startupPeriodSeconds = 10
)
-func newGRPCPipeline(desired *flowslatest.FlowCollectorSpec) config.PipelineBuilderStage {
- return config.NewGRPCPipeline("grpc", api.IngestGRPCProto{
- Port: int(*helper.GetAdvancedProcessorConfig(desired).Port),
- })
+func newGRPCPipeline(desired *flowslatest.FlowCollectorSpec, volumes *volumes.Builder) config.PipelineBuilderStage {
+ adv := helper.GetAdvancedProcessorConfig(desired)
+ cfg := api.IngestGRPCProto{Port: int(*adv.Port)}
+ skipTLS := flowslatest.IsEnvEnabled(adv.Env, "SERVER_NOTLS")
+ if desired.DeploymentModel == flowslatest.DeploymentModelService && !skipTLS {
+ // Communication from agents uses TLS: set up server certificate
+ ref := flowslatest.CertificateReference{
+ Type: flowslatest.RefTypeSecret,
+ Name: monoCertSecretName,
+ CertFile: "tls.crt",
+ CertKey: "tls.key",
+ }
+ cert, key := volumes.AddCertificate(&ref, "svc-certs")
+ cfg.CertPath = cert
+ cfg.KeyPath = key
+ }
+ return config.NewGRPCPipeline("grpc", cfg)
}
func newKafkaPipeline(desired *flowslatest.FlowCollectorSpec, volumes *volumes.Builder) config.PipelineBuilderStage {
diff --git a/internal/controller/flp/flp_monolith_objects.go b/internal/controller/flp/flp_monolith_objects.go
index 9879e863e..9434113a7 100644
--- a/internal/controller/flp/flp_monolith_objects.go
+++ b/internal/controller/flp/flp_monolith_objects.go
@@ -21,6 +21,7 @@ const (
monoDynConfigMap = monoName + "-config-dynamic"
monoServiceMonitor = monoName + "-monitor"
monoPromRule = monoName + "-alert"
+ monoCertSecretName = monoName + "-cert"
)
type monolithBuilder struct {
@@ -39,6 +40,7 @@ func newMonolithBuilder(info *reconcilers.Instance, desired *flowslatest.FlowCol
if err != nil {
return monolithBuilder{}, err
}
+
return monolithBuilder{
info: info,
desired: desired,
@@ -116,7 +118,7 @@ func (b *monolithBuilder) deployment(annotations map[string]string) *appsv1.Depl
}
func (b *monolithBuilder) configMaps() (*corev1.ConfigMap, string, *corev1.ConfigMap, error) {
- kafkaStage := newGRPCPipeline(b.desired)
+ grpcStage := newGRPCPipeline(b.desired, &b.volumes)
pipeline := newPipelineBuilder(
b.desired,
b.flowMetrics,
@@ -124,7 +126,7 @@ func (b *monolithBuilder) configMaps() (*corev1.ConfigMap, string, *corev1.Confi
b.info.Loki,
b.info.ClusterInfo.GetID(),
&b.volumes,
- &kafkaStage,
+ &grpcStage,
)
err := pipeline.AddProcessorStages()
if err != nil {
@@ -161,6 +163,9 @@ func (b *monolithBuilder) service() *corev1.Service {
ObjectMeta: metav1.ObjectMeta{
Name: monoName,
Namespace: b.info.Namespace,
+ Annotations: map[string]string{
+ constants.OpenShiftCertificateAnnotation: monoCertSecretName,
+ },
Labels: map[string]string{
"part-of": constants.OperatorName,
"app": monoName,
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_grpc.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_grpc.go
index 5da4128cc..fdad54651 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_grpc.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/ingest_grpc.go
@@ -1,6 +1,9 @@
package api
type IngestGRPCProto struct {
- Port int `yaml:"port,omitempty" json:"port,omitempty" doc:"the port number to listen on"`
- BufferLen int `yaml:"bufferLength,omitempty" json:"bufferLength,omitempty" doc:"the length of the ingest channel buffer, in groups of flows, containing each group hundreds of flows (default: 100)"`
+ Port int `yaml:"port,omitempty" json:"port,omitempty" doc:"the port number to listen on"`
+ BufferLen int `yaml:"bufferLength,omitempty" json:"bufferLength,omitempty" doc:"the length of the ingest channel buffer, in groups of flows, containing each group hundreds of flows (default: 100)"`
+ CertPath string `yaml:"certPath,omitempty" json:"certPath,omitempty" doc:"path of the TLS certificate, if any"`
+ KeyPath string `yaml:"keyPath,omitempty" json:"keyPath,omitempty" doc:"path of the TLS certificate key, if any"`
+ ClientCAPath string `yaml:"clientCAPath,omitempty" json:"clientCAPath,omitempty" doc:"path of the client TLS CA, if any, for mutual TLS"`
}
diff --git a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
index 68a2e99c0..b2e845528 100644
--- a/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
+++ b/vendor/github.com/netobserv/flowlogs-pipeline/pkg/api/transform_network.go
@@ -98,15 +98,26 @@ type K8sReference struct {
}
type K8sRule struct {
- IPField string `yaml:"ipField,omitempty" json:"ipField,omitempty" doc:"entry IP input field"`
- InterfacesField string `yaml:"interfacesField,omitempty" json:"interfacesField,omitempty" doc:"entry Interfaces input field"`
- UDNsField string `yaml:"udnsField,omitempty" json:"udnsField,omitempty" doc:"entry UDNs input field"`
- MACField string `yaml:"macField,omitempty" json:"macField,omitempty" doc:"entry MAC input field"`
- Output string `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
- Assignee string `yaml:"assignee,omitempty" json:"assignee,omitempty" doc:"value needs to assign to output field"`
- LabelsPrefix string `yaml:"labels_prefix,omitempty" json:"labels_prefix,omitempty" doc:"labels prefix to use to copy input lables, if empty labels will not be copied"`
- AddZone bool `yaml:"add_zone,omitempty" json:"add_zone,omitempty" doc:"if true the rule will add the zone"`
- OutputKeys K8SOutputKeys `yaml:"-" json:"-"`
+ IPField string `yaml:"ipField,omitempty" json:"ipField,omitempty" doc:"entry IP input field"`
+ InterfacesField string `yaml:"interfacesField,omitempty" json:"interfacesField,omitempty" doc:"entry Interfaces input field"`
+ UDNsField string `yaml:"udnsField,omitempty" json:"udnsField,omitempty" doc:"entry UDNs input field"`
+ MACField string `yaml:"macField,omitempty" json:"macField,omitempty" doc:"entry MAC input field"`
+ Output string `yaml:"output,omitempty" json:"output,omitempty" doc:"entry output field"`
+ Assignee string `yaml:"assignee,omitempty" json:"assignee,omitempty" doc:"value needs to assign to output field"`
+ LabelsPrefix string `yaml:"labels_prefix,omitempty" json:"labels_prefix,omitempty" doc:"labels prefix to use to copy input labels, if empty labels will not be copied"`
+ LabelInclusions []string `yaml:"label_inclusions,omitempty" json:"label_inclusions,omitempty" doc:"labels to include, if empty all labels will be included. Only used if labels_prefix is specified"`
+ LabelExclusions []string `yaml:"label_exclusions,omitempty" json:"label_exclusions,omitempty" doc:"labels to exclude, if empty no labels will be excluded. Only used if labels_prefix is specified"`
+ LabelValueMaxLength *int `yaml:"label_value_max_length,omitempty" json:"label_value_max_length,omitempty" doc:"label value max length, if specified, will trim label values to this length"`
+ AnnotationsPrefix string `yaml:"annotations_prefix,omitempty" json:"annotations_prefix,omitempty" doc:"annotations prefix to use to copy input annotations, if empty annotations will not be copied"`
+ AnnotationInclusions []string `yaml:"annotation_inclusions,omitempty" json:"annotation_inclusions,omitempty" doc:"annotations to include, if empty all annotations will be included. Only used if annotations_prefix is specified"`
+ AnnotationExclusions []string `yaml:"annotation_exclusions,omitempty" json:"annotation_exclusions,omitempty" doc:"annotations to exclude, if empty no annotations will be excluded. Only used if annotations_prefix is specified"`
+ AnnotationValueMaxLength *int `yaml:"annotation_value_max_length,omitempty" json:"annotation_value_max_length,omitempty" doc:"annotation value max length, if specified, will trim annotation values to this length"`
+ AddZone bool `yaml:"add_zone,omitempty" json:"add_zone,omitempty" doc:"if true the rule will add the zone"`
+ OutputKeys K8SOutputKeys `yaml:"-" json:"-"`
+ LabelInclusionsMap map[string]struct{} `yaml:"-" json:"-"`
+ LabelExclusionsMap map[string]struct{} `yaml:"-" json:"-"`
+ AnnotationInclusionsMap map[string]struct{} `yaml:"-" json:"-"`
+ AnnotationExclusionsMap map[string]struct{} `yaml:"-" json:"-"`
}
type K8SOutputKeys struct {
@@ -150,6 +161,11 @@ func (r *K8sRule) preprocess() {
Zone: r.Output + "_Zone",
}
}
+
+ r.LabelInclusionsMap = buildStringMap(r.LabelInclusions)
+ r.LabelExclusionsMap = buildStringMap(r.LabelExclusions)
+ r.AnnotationInclusionsMap = buildStringMap(r.AnnotationInclusions)
+ r.AnnotationExclusionsMap = buildStringMap(r.AnnotationExclusions)
}
type SecondaryNetwork struct {
@@ -199,3 +215,11 @@ type NetworkTransformSubnetLabel struct {
CIDRs []string `yaml:"cidrs,omitempty" json:"cidrs,omitempty" doc:"list of CIDRs to match a label"`
Name string `yaml:"name,omitempty" json:"name,omitempty" doc:"name of the label"`
}
+
+func buildStringMap(items []string) map[string]struct{} {
+ m := make(map[string]struct{}, len(items))
+ for _, item := range items {
+ m[item] = struct{}{}
+ }
+ return m
+}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
index e854d7e84..2950fdb42 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/id.go
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-func unmarshalJSON(dst []byte, src []byte) error {
+func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
index 29e629d66..5bb3b16c7 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/number.go
@@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error {
// strings or integers.
type protoUint64 uint64
-// Int64 returns the protoUint64 as a uint64.
+// Uint64 returns the protoUint64 as a uint64.
func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
// UnmarshalJSON decodes both strings and integers.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
index a13a6b733..67f80b6aa 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/span.go
@@ -10,6 +10,7 @@ import (
"errors"
"fmt"
"io"
+ "math"
"time"
)
@@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) {
}{
Alias: Alias(s),
ParentSpanID: parentSpanId,
- StartTime: uint64(startT),
- EndTime: uint64(endT),
+ StartTime: uint64(startT), // nolint:gosec // >0 checked above.
+ EndTime: uint64(endT), // nolint:gosec // >0 checked above.
})
}
@@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error {
case "startTimeUnixNano", "start_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- s.StartTime = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ s.StartTime = time.Unix(0, v)
case "endTimeUnixNano", "end_time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- s.EndTime = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ s.EndTime = time.Unix(0, v)
case "attributes":
err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count":
@@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error {
type SpanFlags int32
const (
+ // SpanFlagsTraceFlagsMask is a mask for trace-flags.
+ //
// Bits 0-7 are used for trace flags.
SpanFlagsTraceFlagsMask SpanFlags = 255
- // Bits 8 and 9 are used to indicate that the parent span or link span is remote.
- // Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
- // Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote.
+ // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
SpanFlagsContextHasIsRemoteMask SpanFlags = 256
- // SpanFlagsContextHasIsRemoteMask indicates the Span is remote.
+ // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
+ //
+ // Bits 8 and 9 are used to indicate that the parent span or link span is
+ // remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
+ // remote.
SpanFlagsContextIsRemoteMask SpanFlags = 512
)
@@ -263,26 +273,30 @@ const (
type SpanKind int32
const (
- // Indicates that the span represents an internal operation within an application,
- // as opposed to an operation happening at the boundaries. Default value.
+ // SpanKindInternal indicates that the span represents an internal
+ // operation within an application, as opposed to an operation happening at
+ // the boundaries.
SpanKindInternal SpanKind = 1
- // Indicates that the span covers server-side handling of an RPC or other
- // remote network request.
+ // SpanKindServer indicates that the span covers server-side handling of an
+ // RPC or other remote network request.
SpanKindServer SpanKind = 2
- // Indicates that the span describes a request to some remote service.
+ // SpanKindClient indicates that the span describes a request to some
+ // remote service.
SpanKindClient SpanKind = 3
- // Indicates that the span describes a producer sending a message to a broker.
- // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship
- // between producer and consumer spans. A PRODUCER span ends when the message was accepted
- // by the broker while the logical processing of the message might span a much longer time.
+ // SpanKindProducer indicates that the span describes a producer sending a
+ // message to a broker. Unlike SpanKindClient and SpanKindServer, there is
+ // often no direct critical path latency relationship between producer and
+ // consumer spans. A SpanKindProducer span ends when the message was
+ // accepted by the broker while the logical processing of the message might
+ // span a much longer time.
SpanKindProducer SpanKind = 4
- // Indicates that the span describes consumer receiving a message from a broker.
- // Like the PRODUCER kind, there is often no direct critical path latency relationship
- // between producer and consumer spans.
+ // SpanKindConsumer indicates that the span describes a consumer receiving
+ // a message from a broker. Like SpanKindProducer, there is often no direct
+ // critical path latency relationship between producer and consumer spans.
SpanKindConsumer SpanKind = 5
)
-// Event is a time-stamped annotation of the span, consisting of user-supplied
+// SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs.
type SpanEvent struct {
// time_unix_nano is the time the event occurred.
@@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) {
Time uint64 `json:"timeUnixNano,omitempty"`
}{
Alias: Alias(e),
- Time: uint64(t),
+ Time: uint64(t), //nolint:gosec // >0 checked above
})
}
@@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
case "timeUnixNano", "time_unix_nano":
var val protoUint64
err = decoder.Decode(&val)
- se.Time = time.Unix(0, int64(val.Uint64()))
+ v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
+ se.Time = time.Unix(0, v)
case "name":
err = decoder.Decode(&se.Name)
case "attributes":
@@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
return nil
}
-// A pointer from the current span to another span in the same trace or in a
-// different trace. For example, this can be used in batching operations,
-// where a single batch handler processes multiple requests from different
-// traces or when the handler receives a request from a different project.
+// SpanLink is a reference from the current span to another span in the same
+// trace or in a different trace. For example, this can be used in batching
+// operations, where a single batch handler processes multiple requests from
+// different traces or when the handler receives a request from a different
+// project.
type SpanLink struct {
// A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array.
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
index 1217776ea..a2802764f 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/status.go
@@ -3,17 +3,19 @@
package telemetry
+// StatusCode is the status of a Span.
+//
// For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32
const (
- // The default status.
+ // StatusCodeUnset is the default status.
StatusCodeUnset StatusCode = 0
- // The Span has been validated by an Application developer or Operator to
- // have completed successfully.
+ // StatusCodeOK is used when the Span has been validated by an Application
+ // developer or Operator to have completed successfully.
StatusCodeOK StatusCode = 1
- // The Span contains an error.
+ // StatusCodeError is used when the Span contains an error.
StatusCodeError StatusCode = 2
)
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
index 69a348f0f..44197b808 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/traces.go
@@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error {
return nil
}
-// A collection of ScopeSpans from a Resource.
+// ResourceSpans is a collection of ScopeSpans from a Resource.
type ResourceSpans struct {
// The resource for the spans in this message.
// If this field is not set then no resource info is known.
@@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
return nil
}
-// A collection of Spans produced by an InstrumentationScope.
+// ScopeSpans is a collection of Spans produced by an InstrumentationScope.
type ScopeSpans struct {
// The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with
diff --git a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
index 0dd01b063..022768bb5 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/internal/telemetry/value.go
@@ -1,8 +1,6 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-//go:generate stringer -type=ValueKind -trimprefix=ValueKind
-
package telemetry
import (
@@ -23,7 +21,7 @@ import (
// A zero value is valid and represents an empty value.
type Value struct {
// Ensure forward compatibility by explicitly making this not comparable.
- noCmp [0]func() //nolint: unused // This is indeed used.
+ noCmp [0]func() //nolint:unused // This is indeed used.
// num holds the value for Int64, Float64, and Bool. It holds the length
// for String, Bytes, Slice, Map.
@@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) }
// Int64Value returns a [Value] for an int64.
func Int64Value(v int64) Value {
- return Value{num: uint64(v), any: ValueKindInt64}
+ return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv.
}
// Float64Value returns a [Value] for a float64.
@@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 {
// this will return garbage.
func (v Value) asInt64() int64 {
// Assumes v.num was a valid int64 (overflow not checked).
- return int64(v.num) // nolint: gosec
+ return int64(v.num) //nolint:gosec // Bounded.
}
// AsBool returns the value held by v as a bool.
@@ -309,13 +307,13 @@ func (v Value) String() string {
return v.asString()
case ValueKindInt64:
// Assumes v.num was a valid int64 (overflow not checked).
- return strconv.FormatInt(int64(v.num), 10) // nolint: gosec
+ return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded.
case ValueKindFloat64:
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
case ValueKindBool:
return strconv.FormatBool(v.asBool())
case ValueKindBytes:
- return fmt.Sprint(v.asBytes())
+ return string(v.asBytes())
case ValueKindMap:
return fmt.Sprint(v.asMap())
case ValueKindSlice:
@@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) {
case ValueKindInt64:
return json.Marshal(struct {
Value string `json:"intValue"`
- }{strconv.FormatInt(int64(v.num), 10)})
+ }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv.
case ValueKindFloat64:
return json.Marshal(struct {
Value float64 `json:"doubleValue"`
diff --git a/vendor/go.opentelemetry.io/auto/sdk/span.go b/vendor/go.opentelemetry.io/auto/sdk/span.go
index 6ebea12a9..815d271ff 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/span.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/span.go
@@ -6,6 +6,7 @@ package sdk
import (
"encoding/json"
"fmt"
+ "math"
"reflect"
"runtime"
"strings"
@@ -16,7 +17,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
@@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
limit := maxSpan.Attrs
if limit == 0 {
// No attributes allowed.
- s.span.DroppedAttrs += uint32(len(attrs))
+ n := int64(len(attrs))
+ if n > 0 {
+ s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked.
+ min(n, math.MaxUint32),
+ )
+ }
return
}
@@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
// number of dropped attributes is also returned.
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
+ n := len(attrs)
if limit == 0 {
- return nil, uint32(len(attrs))
+ var out uint32
+ if n > 0 {
+ out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked.
+ }
+ return nil, out
}
if limit < 0 {
@@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u
return convAttrs(attrs), 0
}
- limit = min(len(attrs), limit)
- return convAttrs(attrs[:limit]), uint32(len(attrs) - limit)
+ if n < 0 {
+ n = 0
+ }
+
+ limit = min(n, limit)
+ return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked.
}
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {
diff --git a/vendor/go.opentelemetry.io/auto/sdk/tracer.go b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
index cbcfabde3..e09acf022 100644
--- a/vendor/go.opentelemetry.io/auto/sdk/tracer.go
+++ b/vendor/go.opentelemetry.io/auto/sdk/tracer.go
@@ -5,6 +5,7 @@ package sdk
import (
"context"
+ "math"
"time"
"go.opentelemetry.io/otel/trace"
@@ -21,15 +22,20 @@ type tracer struct {
var _ trace.Tracer = tracer{}
-func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) {
- var psc trace.SpanContext
+func (t tracer) Start(
+ ctx context.Context,
+ name string,
+ opts ...trace.SpanStartOption,
+) (context.Context, trace.Span) {
+ var psc, sc trace.SpanContext
sampled := true
span := new(span)
// Ask eBPF for sampling decision and span context info.
- t.start(ctx, span, &psc, &sampled, &span.spanContext)
+ t.start(ctx, span, &psc, &sampled, &sc)
span.sampled.Store(sampled)
+ span.spanContext = sc
ctx = trace.ContextWithSpan(ctx, span)
@@ -58,7 +64,13 @@ func (t *tracer) start(
// start is used for testing.
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
-func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) {
+var intToUint32Bound = min(math.MaxInt, math.MaxUint32)
+
+func (t tracer) traces(
+ name string,
+ cfg trace.SpanConfig,
+ sc, psc trace.SpanContext,
+) (*telemetry.Traces, *telemetry.Span) {
span := &telemetry.Span{
TraceID: telemetry.TraceID(sc.TraceID()),
SpanID: telemetry.SpanID(sc.SpanID()),
@@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont
links := cfg.Links()
if limit := maxSpan.Links; limit == 0 {
- span.DroppedLinks = uint32(len(links))
+ n := len(links)
+ if n > 0 {
+ bounded := max(min(n, intToUint32Bound), 0)
+ span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
+ }
} else {
if limit > 0 {
n := max(len(links)-limit, 0)
- span.DroppedLinks = uint32(n)
+ bounded := min(n, intToUint32Bound)
+ span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
links = links[n:]
}
span.Links = convLinks(links)
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index 6bf3abc41..2b53a25e1 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -7,3 +7,4 @@ ans
nam
valu
thirdparty
+addOpt
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index 5f69cc027..b01762ffc 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -10,6 +10,7 @@ linters:
- depguard
- errcheck
- errorlint
+ - gocritic
- godot
- gosec
- govet
@@ -86,6 +87,18 @@ linters:
deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
desc: Do not use cross-module internal packages.
+ gocritic:
+ disabled-checks:
+ - appendAssign
+ - commentedOutCode
+ - dupArg
+ - hugeParam
+ - importShadow
+ - preferDecodeRune
+ - rangeValCopy
+ - unnamedResult
+ - whyNoLint
+ enable-all: true
godot:
exclude:
# Exclude links.
@@ -167,7 +180,10 @@ linters:
- fmt.Print
- fmt.Printf
- fmt.Println
+ - name: unused-parameter
+ - name: unused-receiver
- name: unnecessary-stmt
+ - name: use-any
- name: useless-break
- name: var-declaration
- name: var-naming
@@ -224,10 +240,6 @@ linters:
- linters:
- gosec
text: 'G402: TLS MinVersion too low.'
- paths:
- - third_party$
- - builtin$
- - examples$
issues:
max-issues-per-linter: 0
max-same-issues: 0
@@ -237,14 +249,12 @@ formatters:
- goimports
- golines
settings:
+ gofumpt:
+ extra-rules: true
goimports:
local-prefixes:
- - go.opentelemetry.io
+ - go.opentelemetry.io/otel
golines:
max-len: 120
exclusions:
generated: lax
- paths:
- - third_party$
- - builtin$
- - examples$
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
index 40d62fa2e..532850588 100644
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -2,5 +2,8 @@ http://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
+# Weaver model URL for semantic-conventions repository.
+https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
+http://4.3.2.1:78/user/123
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index 4acc75701..f3abcfdc2 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -11,6 +11,93 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
+## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
+
+This release is the last to support [Go 1.23].
+The next release will require at least [Go 1.24].
+
+### Added
+
+- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772)
+- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939)
+ - `ContainerLabel`
+ - `DBOperationParameter`
+ - `DBSystemParameter`
+ - `HTTPRequestHeader`
+ - `HTTPResponseHeader`
+ - `K8SCronJobAnnotation`
+ - `K8SCronJobLabel`
+ - `K8SDaemonSetAnnotation`
+ - `K8SDaemonSetLabel`
+ - `K8SDeploymentAnnotation`
+ - `K8SDeploymentLabel`
+ - `K8SJobAnnotation`
+ - `K8SJobLabel`
+ - `K8SNamespaceAnnotation`
+ - `K8SNamespaceLabel`
+ - `K8SNodeAnnotation`
+ - `K8SNodeLabel`
+ - `K8SPodAnnotation`
+ - `K8SPodLabel`
+ - `K8SReplicaSetAnnotation`
+ - `K8SReplicaSetLabel`
+ - `K8SStatefulSetAnnotation`
+ - `K8SStatefulSetLabel`
+ - `ProcessEnvironmentVariable`
+ - `RPCConnectRPCRequestMetadata`
+ - `RPCConnectRPCResponseMetadata`
+ - `RPCGRPCRequestMetadata`
+ - `RPCGRPCResponseMetadata`
+- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962)
+- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968)
+- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179)
+- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001)
+- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`.
+ Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209)
+- The `go.opentelemetry.io/otel/semconv/v1.36.0` package.
+ The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041)
+- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111)
+- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`.
+ Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121)
+- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`.
+ Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133)
+- Support testing of [Go 1.25]. (#7187)
+- The `go.opentelemetry.io/otel/semconv/v1.37.0` package.
+ The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254)
+
+### Changed
+
+- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791)
+- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908)
+- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094)
+
+### Fixed
+
+- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002)
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088)
+- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195)
+- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199)
+
+### Deprecated
+
+- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111)
+- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166)
+
+## [0.59.1] 2025-07-21
+
+### Changed
+
+- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046)
+- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled.
+ It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044)
+
+### Fixed
+
+- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets.
+ E.g. `{spans}`. (#7044)
+
## [1.37.0/0.59.0/0.13.0] 2025-06-25
### Added
@@ -3343,7 +3430,8 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.37.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
+[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
@@ -3439,6 +3527,7 @@ It contains api and sdk for trace and meter.
+[Go 1.25]: https://go.dev/doc/go1.25
[Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22
diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS
index 945a07d2b..26a03aed1 100644
--- a/vendor/go.opentelemetry.io/otel/CODEOWNERS
+++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS
@@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners
#
-* @MrAlias @XSAM @dashpole @pellared @dmathieu
+* @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index f9ddc281f..0b3ae855c 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -192,6 +192,35 @@ should have `go test -bench` output in their description.
should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
output in their description.
+## Dependencies
+
+This project uses [Go Modules] for dependency management. All modules will use
+`go.mod` to explicitly list all direct and indirect dependencies, ensuring a
+clear dependency graph. The `go.sum` file for each module will be committed to
+the repository and used to verify the integrity of downloaded modules,
+preventing malicious tampering.
+
+This project uses automated dependency update tools (i.e. dependabot,
+renovatebot) to manage updates to dependencies. This ensures that dependencies
+are kept up-to-date with the latest security patches and features and are
+reviewed before being merged. If you would like to propose a change to a
+dependency it should be done through a pull request that updates the `go.mod`
+file and includes a description of the change.
+
+See the [versioning and compatibility](./VERSIONING.md) policy for more details
+about dependency compatibility.
+
+[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more
+
+### Environment Dependencies
+
+This project does not partition dependencies based on the environment (i.e.
+`development`, `staging`, `production`).
+
+Only the dependencies explicitly included in the released modules have be
+tested and verified to work with the released code. No other guarantee is made
+about the compatibility of other dependencies.
+
## Documentation
Each (non-internal, non-test) package must be documented using
@@ -233,6 +262,10 @@ For a non-comprehensive but foundational overview of these best practices
the [Effective Go](https://golang.org/doc/effective_go.html) documentation
is an excellent starting place.
+We also recommend following the
+[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
+that collects common comments made during reviews of Go code.
+
As a convenience for developers building this project the `make precommit`
will format, lint, validate, and in some cases fix the changes you plan to
submit. This check will need to pass for your changes to be able to be
@@ -586,6 +619,10 @@ See also:
### Testing
+We allow using [`testify`](https://github.com/stretchr/testify) even though
+it is seen as non-idiomatic according to
+the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page.
+
The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the
@@ -640,13 +677,6 @@ should be canceled.
## Approvers and Maintainers
-### Triagers
-
-- [Alex Kats](https://github.com/akats7), Capital One
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
-
-### Approvers
-
### Maintainers
- [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
@@ -655,6 +685,21 @@ should be canceled.
- [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
- [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
+For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
+
+### Approvers
+
+- [Flc](https://github.com/flc1125), Independent
+
+For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
+
+### Triagers
+
+- [Alex Kats](https://github.com/akats7), Capital One
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
+
+For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
+
### Emeritus
- [Aaron Clawson](https://github.com/MadVikingGod)
@@ -665,6 +710,8 @@ should be canceled.
- [Josh MacDonald](https://github.com/jmacd)
- [Liz Fong-Jones](https://github.com/lizthegrey)
+For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
+
### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community
diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index 4fa423ca0..bc0f1f92d 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -34,9 +34,6 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
-SEMCONVGEN = $(TOOLS)/semconvgen
-$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
-
CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
@@ -71,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools
-tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
+tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker
@@ -284,7 +281,7 @@ semconv-generate: $(SEMCONVKIT)
docker run --rm \
-u $(DOCKER_USER) \
--env HOME=/tmp/weaver \
- --mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \
+ --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \
--mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
--mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
$(WEAVER_IMAGE) registry generate \
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 5fa1b75c6..6b7ab5f21 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -53,18 +53,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture |
|----------|------------|--------------|
+| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 |
+| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 |
+| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 |
+| macOS 13 | 1.25 | amd64 |
| macOS 13 | 1.24 | amd64 |
| macOS 13 | 1.23 | amd64 |
+| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 |
+| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 |
+| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
diff --git a/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml
new file mode 100644
index 000000000..8041fc62e
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml
@@ -0,0 +1,203 @@
+header:
+ schema-version: "1.0.0"
+ expiration-date: "2026-08-04T00:00:00.000Z"
+ last-updated: "2025-08-04"
+ last-reviewed: "2025-08-04"
+ commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8
+ project-url: https://github.com/open-telemetry/opentelemetry-go
+ project-release: "v1.37.0"
+ changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md
+ license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE
+
+project-lifecycle:
+ status: active
+ bug-fixes-only: false
+ core-maintainers:
+ - https://github.com/dmathieu
+ - https://github.com/dashpole
+ - https://github.com/pellared
+ - https://github.com/XSAM
+ - https://github.com/MrAlias
+ release-process: |
+ See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md
+
+contribution-policy:
+ accepts-pull-requests: true
+ accepts-automated-pull-requests: true
+ automated-tools-list:
+ - automated-tool: dependabot
+ action: allowed
+ comment: Automated dependency updates are accepted.
+ - automated-tool: renovatebot
+ action: allowed
+ comment: Automated dependency updates are accepted.
+ - automated-tool: opentelemetrybot
+ action: allowed
+ comment: Automated OpenTelemetry actions are accepted.
+ contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md
+
+documentation:
+ - https://pkg.go.dev/go.opentelemetry.io/otel
+ - https://opentelemetry.io/docs/instrumentation/go/
+
+distribution-points:
+ - pkg:golang/go.opentelemetry.io/otel
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test
+ - pkg:golang/go.opentelemetry.io/otel/bridge/opentracing
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/zipkin
+ - pkg:golang/go.opentelemetry.io/otel/metric
+ - pkg:golang/go.opentelemetry.io/otel/sdk
+ - pkg:golang/go.opentelemetry.io/otel/sdk/metric
+ - pkg:golang/go.opentelemetry.io/otel/trace
+ - pkg:golang/go.opentelemetry.io/otel/exporters/prometheus
+ - pkg:golang/go.opentelemetry.io/otel/log
+ - pkg:golang/go.opentelemetry.io/otel/log/logtest
+ - pkg:golang/go.opentelemetry.io/otel/sdk/log
+ - pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
+ - pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
+ - pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog
+ - pkg:golang/go.opentelemetry.io/otel/schema
+
+security-artifacts:
+ threat-model:
+ threat-model-created: false
+ comment: |
+ No formal threat model created yet.
+ self-assessment:
+ self-assessment-created: false
+ comment: |
+ No formal self-assessment yet.
+
+security-testing:
+ - tool-type: sca
+ tool-name: Dependabot
+ tool-version: latest
+ tool-url: https://github.com/dependabot
+ tool-rulesets:
+ - built-in
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ Automated dependency updates.
+ - tool-type: sast
+ tool-name: golangci-lint
+ tool-version: latest
+ tool-url: https://github.com/golangci/golangci-lint
+ tool-rulesets:
+ - built-in
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ Static analysis in CI.
+ - tool-type: fuzzing
+ tool-name: OSS-Fuzz
+ tool-version: latest
+ tool-url: https://github.com/google/oss-fuzz
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: false
+ before-release: false
+ comment: |
+ OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details.
+ - tool-type: sast
+ tool-name: CodeQL
+ tool-version: latest
+ tool-url: https://github.com/github/codeql
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details.
+ - tool-type: sca
+ tool-name: govulncheck
+ tool-version: latest
+ tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck
+ tool-rulesets:
+ - default
+ integration:
+ ad-hoc: false
+ ci: true
+ before-release: true
+ comment: |
+ govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration.
+
+security-assessments:
+ - auditor-name: 7ASecurity
+ auditor-url: https://7asecurity.com
+ auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf
+ report-year: 2023
+ comment: |
+ This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository.
+
+security-contacts:
+ - type: email
+ value: cncf-opentelemetry-security@lists.cncf.io
+ primary: true
+ - type: website
+ value: https://github.com/open-telemetry/opentelemetry-go/security/policy
+ primary: false
+
+vulnerability-reporting:
+ accepts-vulnerability-reports: true
+ email-contact: cncf-opentelemetry-security@lists.cncf.io
+ security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy
+ comment: |
+ Security issues should be reported via email or GitHub security policy page.
+
+dependencies:
+ third-party-packages: true
+ dependencies-lists:
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod
+ - https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod
+ dependencies-lifecycle:
+ policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ comment: |
+ Dependency lifecycle managed via go.mod and renovatebot.
+ env-dependencies-policy:
+ policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
+ comment: |
+ See contributing policy for environment usage.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index 318e42fca..6333d34b3 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -78,7 +78,7 @@ func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{
- New: func() interface{} {
+ New: func() any {
return &bytes.Buffer{}
},
},
@@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string {
for iter.Next() {
i, keyValue := iter.IndexedAttribute()
if i > 0 {
- _, _ = buf.WriteRune(',')
+ _ = buf.WriteByte(',')
}
copyAndEscape(buf, string(keyValue.Key))
- _, _ = buf.WriteRune('=')
+ _ = buf.WriteByte('=')
if keyValue.Value.Type() == STRING {
copyAndEscape(buf, keyValue.Value.AsString())
@@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val {
switch ch {
case '=', ',', escapeChar:
- _, _ = buf.WriteRune(escapeChar)
+ _ = buf.WriteByte(escapeChar)
}
_, _ = buf.WriteRune(ch)
}
}
-// Valid returns true if this encoder ID was allocated by
-// `NewEncoderID`. Invalid encoder IDs will not be cached.
+// Valid reports whether this encoder ID was allocated by
+// [NewEncoderID]. Invalid encoder IDs will not be cached.
func (id EncoderID) Valid() bool {
return id.value != 0
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/filter.go b/vendor/go.opentelemetry.io/otel/attribute/filter.go
index 3eeaa5d44..624ebbe38 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/filter.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/filter.go
@@ -15,8 +15,8 @@ type Filter func(KeyValue) bool
//
// If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return false }
+ if len(keys) == 0 {
+ return func(KeyValue) bool { return false }
}
allowed := make(map[Key]struct{}, len(keys))
@@ -34,8 +34,8 @@ func NewAllowKeysFilter(keys ...Key) Filter {
//
// If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter {
- if len(keys) <= 0 {
- return func(kv KeyValue) bool { return true }
+ if len(keys) == 0 {
+ return func(KeyValue) bool { return true }
}
forbid := make(map[Key]struct{}, len(keys))
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
index b76d2bbfd..087550430 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/attribute.go
@@ -12,7 +12,7 @@ import (
)
// BoolSliceValue converts a bool slice into an array with same elements as slice.
-func BoolSliceValue(v []bool) interface{} {
+func BoolSliceValue(v []bool) any {
var zero bool
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -20,7 +20,7 @@ func BoolSliceValue(v []bool) interface{} {
}
// Int64SliceValue converts an int64 slice into an array with same elements as slice.
-func Int64SliceValue(v []int64) interface{} {
+func Int64SliceValue(v []int64) any {
var zero int64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -28,7 +28,7 @@ func Int64SliceValue(v []int64) interface{} {
}
// Float64SliceValue converts a float64 slice into an array with same elements as slice.
-func Float64SliceValue(v []float64) interface{} {
+func Float64SliceValue(v []float64) any {
var zero float64
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -36,7 +36,7 @@ func Float64SliceValue(v []float64) interface{} {
}
// StringSliceValue converts a string slice into an array with same elements as slice.
-func StringSliceValue(v []string) interface{} {
+func StringSliceValue(v []string) any {
var zero string
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v))
@@ -44,7 +44,7 @@ func StringSliceValue(v []string) interface{} {
}
// AsBoolSlice converts a bool array into a slice into with same elements as array.
-func AsBoolSlice(v interface{}) []bool {
+func AsBoolSlice(v any) []bool {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -57,7 +57,7 @@ func AsBoolSlice(v interface{}) []bool {
}
// AsInt64Slice converts an int64 array into a slice into with same elements as array.
-func AsInt64Slice(v interface{}) []int64 {
+func AsInt64Slice(v any) []int64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -70,7 +70,7 @@ func AsInt64Slice(v interface{}) []int64 {
}
// AsFloat64Slice converts a float64 array into a slice into with same elements as array.
-func AsFloat64Slice(v interface{}) []float64 {
+func AsFloat64Slice(v any) []float64 {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
@@ -83,7 +83,7 @@ func AsFloat64Slice(v interface{}) []float64 {
}
// AsStringSlice converts a string array into a slice into with same elements as array.
-func AsStringSlice(v interface{}) []string {
+func AsStringSlice(v any) []string {
rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array {
return nil
diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
index f2ba89ce4..8df6249f0 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/iterator.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go
@@ -25,8 +25,8 @@ type oneIterator struct {
attr KeyValue
}
-// Next moves the iterator to the next position. Returns false if there are no
-// more attributes.
+// Next moves the iterator to the next position.
+// Next reports whether there are more attributes.
func (i *Iterator) Next() bool {
i.idx++
return i.idx < i.Len()
@@ -106,7 +106,8 @@ func (oi *oneIterator) advance() {
}
}
-// Next returns true if there is another attribute available.
+// Next moves the iterator to the next position.
+// Next reports whether there is another attribute available.
func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done {
return false
diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go
index d9a22c650..80a9e5643 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/key.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/key.go
@@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue {
}
}
-// Defined returns true for non-empty keys.
+// Defined reports whether the key is not empty.
func (k Key) Defined() bool {
return len(k) != 0
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go
index 3028f9a40..8c6928ca7 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/kv.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go
@@ -13,7 +13,7 @@ type KeyValue struct {
Value Value
}
-// Valid returns if kv is a valid OpenTelemetry attribute.
+// Valid reports whether kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool {
return kv.Key.Defined() && kv.Value.Type() != INVALID
}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 6cbefcead..64735d382 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -31,11 +31,11 @@ type (
// Distinct is a unique identifier of a Set.
//
- // Distinct is designed to be ensures equivalence stability: comparisons
- // will return the save value across versions. For this reason, Distinct
- // should always be used as a map key instead of a Set.
+ // Distinct is designed to ensure equivalence stability: comparisons will
+ // return the same value across versions. For this reason, Distinct should
+ // always be used as a map key instead of a Set.
Distinct struct {
- iface interface{}
+ iface any
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -70,7 +70,7 @@ func (d Distinct) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface)
}
-// Valid returns true if this value refers to a valid Set.
+// Valid reports whether this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
}
@@ -120,7 +120,7 @@ func (l *Set) Value(k Key) (Value, bool) {
return Value{}, false
}
-// HasValue tests whether a key is defined in this set.
+// HasValue reports whether a key is defined in this set.
func (l *Set) HasValue(k Key) bool {
if l == nil {
return false
@@ -155,7 +155,7 @@ func (l *Set) Equivalent() Distinct {
return l.equivalent
}
-// Equals returns true if the argument set is equivalent to this set.
+// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent()
}
@@ -344,7 +344,7 @@ func computeDistinct(kvs []KeyValue) Distinct {
// computeDistinctFixed computes a Distinct for small slices. It returns nil
// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) interface{} {
+func computeDistinctFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -373,7 +373,7 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
// computeDistinctReflect computes a Distinct using reflection, works for any
// size input.
-func computeDistinctReflect(kvs []KeyValue) interface{} {
+func computeDistinctReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -387,7 +387,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
-func (l Set) MarshalLog() interface{} {
+func (l Set) MarshalLog() any {
kvs := make(map[string]string)
for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit()
diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go
index 817eecacf..653c33a86 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/value.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/value.go
@@ -22,7 +22,7 @@ type Value struct {
vtype Type
numeric uint64
stringly string
- slice interface{}
+ slice any
}
const (
@@ -199,8 +199,8 @@ func (v Value) asStringSlice() []string {
type unknownValueType struct{}
-// AsInterface returns Value's data as interface{}.
-func (v Value) AsInterface() interface{} {
+// AsInterface returns Value's data as any.
+func (v Value) AsInterface() any {
switch v.Type() {
case BOOL:
return v.AsBool()
@@ -262,7 +262,7 @@ func (v Value) Emit() string {
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Type string
- Value interface{}
+ Value any
}
jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface()
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index 0e1fe2422..f83a448ec 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
// Baggage name is a valid, non-empty UTF-8 string.
func validateBaggageName(s string) bool {
- if len(s) == 0 {
+ if s == "" {
return false
}
@@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool {
// validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool {
- if len(s) == 0 {
+ if s == "" {
return false
}
diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go
index 49a35b122..d48847ed8 100644
--- a/vendor/go.opentelemetry.io/otel/codes/codes.go
+++ b/vendor/go.opentelemetry.io/otel/codes/codes.go
@@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return errors.New("nil receiver passed to UnmarshalJSON")
}
- var x interface{}
+ var x any
if err := json.Unmarshal(b, &x); err != nil {
return err
}
@@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) {
if !ok {
return nil, fmt.Errorf("invalid code: %d", *c)
}
- return []byte(fmt.Sprintf("%q", str)), nil
+ return fmt.Appendf(nil, "%q", str), nil
}
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
index 935bd4876..a311fbb48 100644
--- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
-FROM python:3.13.5-slim-bullseye@sha256:5b9fc0d8ef79cfb5f300e61cb516e0c668067bbf77646762c38c94107e230dbc AS python
-FROM otel/weaver:v0.15.2@sha256:b13acea09f721774daba36344861f689ac4bb8d6ecd94c4600b4d590c8fb34b9 AS weaver
+FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
+FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
index adbca7d34..86d7f4ba0 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go
@@ -41,22 +41,22 @@ func GetLogger() logr.Logger {
// Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute.
-func Info(msg string, keysAndValues ...interface{}) {
+func Info(msg string, keysAndValues ...any) {
GetLogger().V(4).Info(msg, keysAndValues...)
}
// Error prints messages about exceptional states of the API or SDK.
-func Error(err error, msg string, keysAndValues ...interface{}) {
+func Error(err error, msg string, keysAndValues ...any) {
GetLogger().Error(err, msg, keysAndValues...)
}
// Debug prints messages about all internal changes in the API or SDK.
-func Debug(msg string, keysAndValues ...interface{}) {
+func Debug(msg string, keysAndValues ...any) {
GetLogger().V(8).Info(msg, keysAndValues...)
}
// Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event.
-func Warn(msg string, keysAndValues ...interface{}) {
+func Warn(msg string, keysAndValues ...any) {
GetLogger().V(1).Info(msg, keysAndValues...)
}
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
index 49e4ac4fa..bf5cf3119 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/trace.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go
@@ -26,6 +26,7 @@ import (
"sync/atomic"
"go.opentelemetry.io/auto/sdk"
+
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/metric/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
index ebda5026d..051882602 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go
@@ -20,7 +20,7 @@ type Baggage struct{}
var _ TextMapPropagator = Baggage{}
// Inject sets baggage key-values from ctx into the carrier.
-func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
+func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
bStr := baggage.FromContext(ctx).String()
if bStr != "" {
carrier.Set(baggageHeader, bStr)
@@ -30,7 +30,7 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
// Extract returns a copy of parent with the baggage from the carrier added.
// If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
// for multiple values extraction. Otherwise, Get is called.
-func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
+func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
if multiCarrier, ok := carrier.(ValuesGetter); ok {
return extractMultiBaggage(parent, multiCarrier)
}
@@ -38,7 +38,7 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context
}
// Fields returns the keys who's values are set with Inject.
-func (b Baggage) Fields() []string {
+func (Baggage) Fields() []string {
return []string{baggageHeader}
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
index 5c8c26ea2..0a32c59aa 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/propagation.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go
@@ -20,7 +20,7 @@ type TextMapCarrier interface {
// must never be done outside of a new major release.
// Set stores the key-value pair.
- Set(key string, value string)
+ Set(key, value string)
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
@@ -88,7 +88,7 @@ func (hc HeaderCarrier) Values(key string) []string {
}
// Set stores the key-value pair.
-func (hc HeaderCarrier) Set(key string, value string) {
+func (hc HeaderCarrier) Set(key, value string) {
http.Header(hc).Set(key, value)
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 6870e316d..6692d2665 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -36,7 +36,7 @@ var (
)
// Inject injects the trace context from ctx into carrier.
-func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
+func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
sc := trace.SpanContextFromContext(ctx)
if !sc.IsValid() {
return
@@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont
return trace.ContextWithRemoteSpanContext(ctx, sc)
}
-func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
+func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
h := carrier.Get(traceparentHeader)
if h == "" {
return trace.SpanContext{}
@@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool {
}
// Fields returns the keys who's values are set with Inject.
-func (tc TraceContext) Fields() []string {
+func (TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader}
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
index 68d296cbe..1be472e91 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/internal/x/x.go
@@ -19,7 +19,7 @@ import (
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
- if strings.ToLower(v) == "true" {
+ if strings.EqualFold(v, "true") {
return v, true
}
return "", false
@@ -59,7 +59,7 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
return f.parse(vRaw)
}
-// Enabled returns if the feature is enabled.
+// Enabled reports whether the feature is enabled.
func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup()
return ok
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
index cefe4ab91..3f20eb7a5 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go
@@ -13,7 +13,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type (
@@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error))
// Detect returns a *Resource that describes the string as a value
// corresponding to attribute.Key as well as the specific schemaURL.
-func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) {
+func (sd stringDetector) Detect(context.Context) (*Resource, error) {
value, err := sd.F()
if err != nil {
return nil, fmt.Errorf("%s: %w", string(sd.K), err)
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
index 0d8619715..bbe142d20 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/container.go
@@ -11,7 +11,7 @@ import (
"os"
"regexp"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type containerIDProvider func() (string, error)
@@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup"
// Detect returns a *Resource that describes the id of the container.
// If no container id found, an empty resource will be returned.
-func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) {
+func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) {
containerID, err := containerID()
if err != nil {
return nil, err
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
index 16a062ad8..4a1b017ee 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go
@@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
const (
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
index 781903923..5fed33d4f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/host_id.go
@@ -8,7 +8,7 @@ import (
"errors"
"strings"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type hostIDProvider func() (string, error)
@@ -96,7 +96,7 @@ func (r *hostIDReaderLinux) read() (string, error) {
type hostIDDetector struct{}
// Detect returns a *Resource containing the platform specific host id.
-func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) {
+func (hostIDDetector) Detect(context.Context) (*Resource, error) {
hostID, err := hostID()
if err != nil {
return nil, err
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
index 01b4d27a0..51da76e80 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go
@@ -8,7 +8,7 @@ import (
"strings"
"go.opentelemetry.io/otel/attribute"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type osDescriptionProvider func() (string, error)
@@ -32,7 +32,7 @@ type (
// Detect returns a *Resource that describes the operating system type the
// service is running on.
-func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
+func (osTypeDetector) Detect(context.Context) (*Resource, error) {
osType := runtimeOS()
osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
@@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
// Detect returns a *Resource that describes the operating system the
// service is running on.
-func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+func (osDescriptionDetector) Detect(context.Context) (*Resource, error) {
description, err := osDescription()
if err != nil {
return nil, err
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
index f537e5ca5..7252af79f 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go
@@ -63,12 +63,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string {
return values
}
-// skip returns true if the line is blank or starts with a '#' character, and
+// skip reports whether the line is blank or starts with a '#' character, and
// therefore should be skipped from processing.
func skip(line string) bool {
line = strings.TrimSpace(line)
- return len(line) == 0 || strings.HasPrefix(line, "#")
+ return line == "" || strings.HasPrefix(line, "#")
}
// parse attempts to split the provided line on the first '=' character, and then
@@ -76,7 +76,7 @@ func skip(line string) bool {
func parse(line string) (string, string, bool) {
k, v, found := strings.Cut(line, "=")
- if !found || len(k) == 0 {
+ if !found || k == "" {
return "", "", false
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
index 6712ce80d..138e57721 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go
@@ -11,7 +11,7 @@ import (
"path/filepath"
"runtime"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
)
type (
@@ -112,19 +112,19 @@ type (
// Detect returns a *Resource that describes the process identifier (PID) of the
// executing process.
-func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processPIDDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
}
// Detect returns a *Resource that describes the name of the process executable.
-func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) {
executableName := filepath.Base(commandArgs()[0])
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
}
// Detect returns a *Resource that describes the full path of the process executable.
-func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) {
executablePath, err := executablePath()
if err != nil {
return nil, err
@@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err
// Detect returns a *Resource that describes all the command arguments as received
// by the process.
-func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
}
// Detect returns a *Resource that describes the username of the user that owns the
// process.
-func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processOwnerDetector) Detect(context.Context) (*Resource, error) {
owner, err := owner()
if err != nil {
return nil, err
@@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
// Detect returns a *Resource that describes the name of the compiler used to compile
// this process image.
-func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
}
// Detect returns a *Resource that describes the version of the runtime of this process.
-func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
}
// Detect returns a *Resource that describes the runtime of this process.
-func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) {
+func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) {
runtimeDescription := fmt.Sprintf(
"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())
diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
index 09b91e1e1..28e1e4f7e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go
@@ -112,7 +112,7 @@ func (r *Resource) String() string {
}
// MarshalLog is the marshaling function used by the logging system to represent this Resource.
-func (r *Resource) MarshalLog() interface{} {
+func (r *Resource) MarshalLog() any {
return struct {
Attributes attribute.Set
SchemaURL string
@@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator {
return r.attrs.Iter()
}
-// Equal returns whether r and o represent the same resource. Two resources can
+// Equal reports whether r and o represent the same resource. Two resources can
// be equal even if they have different schema URLs.
//
// See the documentation on the [Resource] type for the pitfalls of using ==
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
index 6966ed861..9bc3e525d 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go
@@ -6,24 +6,35 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
"errors"
+ "fmt"
"sync"
"sync/atomic"
"time"
"go.opentelemetry.io/otel"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/internal/env"
+ "go.opentelemetry.io/otel/sdk/trace/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
)
// Defaults for BatchSpanProcessorOptions.
const (
- DefaultMaxQueueSize = 2048
- DefaultScheduleDelay = 5000
+ DefaultMaxQueueSize = 2048
+ // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds.
+ DefaultScheduleDelay = 5000
+ // DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds.
DefaultExportTimeout = 30000
DefaultMaxExportBatchSize = 512
)
+var queueFull = otelconv.ErrorTypeAttr("queue_full")
+
// BatchSpanProcessorOption configures a BatchSpanProcessor.
type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions)
@@ -67,6 +78,11 @@ type batchSpanProcessor struct {
queue chan ReadOnlySpan
dropped uint32
+ selfObservabilityEnabled bool
+ callbackRegistration metric.Registration
+ spansProcessedCounter otelconv.SDKProcessorSpanProcessed
+ componentNameAttr attribute.KeyValue
+
batch []ReadOnlySpan
batchMutex sync.Mutex
timer *time.Timer
@@ -87,11 +103,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
if maxExportBatchSize > maxQueueSize {
- if DefaultMaxExportBatchSize > maxQueueSize {
- maxExportBatchSize = maxQueueSize
- } else {
- maxExportBatchSize = DefaultMaxExportBatchSize
- }
+ maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize)
}
o := BatchSpanProcessorOptions{
@@ -112,6 +124,21 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
stopCh: make(chan struct{}),
}
+ if x.SelfObservability.Enabled() {
+ bsp.selfObservabilityEnabled = true
+ bsp.componentNameAttr = componentName()
+
+ var err error
+ bsp.spansProcessedCounter, bsp.callbackRegistration, err = newBSPObs(
+ bsp.componentNameAttr,
+ func() int64 { return int64(len(bsp.queue)) },
+ int64(bsp.o.MaxQueueSize),
+ )
+ if err != nil {
+ otel.Handle(err)
+ }
+ }
+
bsp.stopWait.Add(1)
go func() {
defer bsp.stopWait.Done()
@@ -122,8 +149,61 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
return bsp
}
+var processorIDCounter atomic.Int64
+
+// nextProcessorID returns an identifier for this batch span processor,
+// starting with 0 and incrementing by 1 each time it is called.
+func nextProcessorID() int64 {
+ return processorIDCounter.Add(1) - 1
+}
+
+func componentName() attribute.KeyValue {
+ id := nextProcessorID()
+ name := fmt.Sprintf("%s/%d", otelconv.ComponentTypeBatchingSpanProcessor, id)
+ return semconv.OTelComponentName(name)
+}
+
+// newBSPObs creates and returns a new set of metrics instruments and a
+// registration for a BatchSpanProcessor. It is the caller's responsibility
+// to unregister the registration when it is no longer needed.
+func newBSPObs(
+ cmpnt attribute.KeyValue,
+ qLen func() int64,
+ qMax int64,
+) (otelconv.SDKProcessorSpanProcessed, metric.Registration, error) {
+ meter := otel.GetMeterProvider().Meter(
+ selfObsScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ qCap, err := otelconv.NewSDKProcessorSpanQueueCapacity(meter)
+
+ qSize, e := otelconv.NewSDKProcessorSpanQueueSize(meter)
+ err = errors.Join(err, e)
+
+ spansProcessed, e := otelconv.NewSDKProcessorSpanProcessed(meter)
+ err = errors.Join(err, e)
+
+ cmpntT := semconv.OTelComponentTypeBatchingSpanProcessor
+ attrs := metric.WithAttributes(cmpnt, cmpntT)
+
+ reg, e := meter.RegisterCallback(
+ func(_ context.Context, o metric.Observer) error {
+ o.ObserveInt64(qSize.Inst(), qLen(), attrs)
+ o.ObserveInt64(qCap.Inst(), qMax, attrs)
+ return nil
+ },
+ qSize.Inst(),
+ qCap.Inst(),
+ )
+ err = errors.Join(err, e)
+
+ return spansProcessed, reg, err
+}
+
// OnStart method does nothing.
-func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {}
+func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd method enqueues a ReadOnlySpan for later processing.
func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
@@ -162,6 +242,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
case <-ctx.Done():
err = ctx.Err()
}
+ if bsp.selfObservabilityEnabled {
+ err = errors.Join(err, bsp.callbackRegistration.Unregister())
+ }
})
return err
}
@@ -171,7 +254,7 @@ type forceFlushSpan struct {
flushed chan struct{}
}
-func (f forceFlushSpan) SpanContext() trace.SpanContext {
+func (forceFlushSpan) SpanContext() trace.SpanContext {
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
}
@@ -274,6 +357,11 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, int64(l),
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor))
+ }
err := bsp.e.ExportSpans(ctx, bsp.batch)
// A new batch is always created after exporting, even if the batch failed to be exported.
@@ -382,11 +470,17 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
case bsp.queue <- sd:
return true
case <-ctx.Done():
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, 1,
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
+ bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ }
return false
}
}
-func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool {
+func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() {
return false
}
@@ -396,12 +490,18 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b
return true
default:
atomic.AddUint32(&bsp.dropped, 1)
+ if bsp.selfObservabilityEnabled {
+ bsp.spansProcessedCounter.Add(ctx, 1,
+ bsp.componentNameAttr,
+ bsp.spansProcessedCounter.AttrComponentType(otelconv.ComponentTypeBatchingSpanProcessor),
+ bsp.spansProcessedCounter.AttrErrorType(queueFull))
+ }
}
return false
}
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
-func (bsp *batchSpanProcessor) MarshalLog() interface{} {
+func (bsp *batchSpanProcessor) MarshalLog() any {
return struct {
Type string
SpanExporter SpanExporter
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
index 1f60524e3..e58e7f6ed 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go
@@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing.
The following assumes a basic familiarity with OpenTelemetry concepts.
See https://opentelemetry.io.
+
+See [go.opentelemetry.io/otel/sdk/trace/internal/x] for information about
+the experimental features.
*/
package trace // import "go.opentelemetry.io/otel/sdk/trace"
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
index c8d3fb7e3..3649322a6 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go
@@ -32,7 +32,7 @@ type randomIDGenerator struct{}
var _ IDGenerator = &randomIDGenerator{}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
-func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID {
+func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID {
sid := trace.SpanID{}
for {
binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
@@ -45,7 +45,7 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
// NewIDs returns a non-zero trace ID and a non-zero span ID from a
// randomly-chosen sequence.
-func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) {
+func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) {
tid := trace.TraceID{}
sid := trace.SpanID{}
for {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
new file mode 100644
index 000000000..feec16fa6
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/README.md
@@ -0,0 +1,35 @@
+# Experimental Features
+
+The Trace SDK contains features that have not yet stabilized in the OpenTelemetry specification.
+These features are added to the OpenTelemetry Go Trace SDK prior to stabilization in the specification so that users can start experimenting with them and provide feedback.
+
+These features may change in backwards incompatible ways as feedback is applied.
+See the [Compatibility and Stability](#compatibility-and-stability) section for more information.
+
+## Features
+
+- [Self-Observability](#self-observability)
+
+### Self-Observability
+
+The SDK provides a self-observability feature that allows you to monitor the SDK itself.
+
+To opt-in, set the environment variable `OTEL_GO_X_SELF_OBSERVABILITY` to `true`.
+
+When enabled, the SDK will create the following metrics using the global `MeterProvider`:
+
+- `otel.sdk.span.live`
+- `otel.sdk.span.started`
+
+Please see the [Semantic conventions for OpenTelemetry SDK metrics] documentation for more details on these metrics.
+
+[Semantic conventions for OpenTelemetry SDK metrics]: https://github.com/open-telemetry/semantic-conventions/blob/v1.36.0/docs/otel/sdk-metrics.md
+
+## Compatibility and Stability
+
+Experimental features do not fall within the scope of the OpenTelemetry Go versioning and stability [policy](../../../../VERSIONING.md).
+These features may be removed or modified in successive version releases, including patch versions.
+
+When an experimental feature is promoted to a stable feature, a migration path will be included in the changelog entry of the release.
+There is no guarantee that any environment variable feature flags that enabled the experimental feature will be supported by the stable version.
+If they are supported, they may be accompanied with a deprecation notice stating a timeline for the removal of that support.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
new file mode 100644
index 000000000..2fcbbcc66
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/internal/x/x.go
@@ -0,0 +1,63 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package x documents experimental features for [go.opentelemetry.io/otel/sdk/trace].
+package x // import "go.opentelemetry.io/otel/sdk/trace/internal/x"
+
+import (
+ "os"
+ "strings"
+)
+
+// SelfObservability is an experimental feature flag that determines if SDK
+// self-observability metrics are enabled.
+//
+// To enable this feature set the OTEL_GO_X_SELF_OBSERVABILITY environment variable
+// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
+// will also enable this).
+var SelfObservability = newFeature("SELF_OBSERVABILITY", func(v string) (string, bool) {
+ if strings.EqualFold(v, "true") {
+ return v, true
+ }
+ return "", false
+})
+
+// Feature is an experimental feature control flag. It provides a uniform way
+// to interact with these feature flags and parse their values.
+type Feature[T any] struct {
+ key string
+ parse func(v string) (T, bool)
+}
+
+func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] {
+ const envKeyRoot = "OTEL_GO_X_"
+ return Feature[T]{
+ key: envKeyRoot + suffix,
+ parse: parse,
+ }
+}
+
+// Key returns the environment variable key that needs to be set to enable the
+// feature.
+func (f Feature[T]) Key() string { return f.key }
+
+// Lookup returns the user configured value for the feature and true if the
+// user has enabled the feature. Otherwise, if the feature is not enabled, a
+// zero-value and false are returned.
+func (f Feature[T]) Lookup() (v T, ok bool) {
+ // https://github.com/open-telemetry/opentelemetry-specification/blob/62effed618589a0bec416a87e559c0a9d96289bb/specification/configuration/sdk-environment-variables.md#parsing-empty-value
+ //
+ // > The SDK MUST interpret an empty value of an environment variable the
+ // > same way as when the variable is unset.
+ vRaw := os.Getenv(f.key)
+ if vRaw == "" {
+ return v, ok
+ }
+ return f.parse(vRaw)
+}
+
+// Enabled reports whether the feature is enabled.
+func (f Feature[T]) Enabled() bool {
+ _, ok := f.Lookup()
+ return ok
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
index 0e2a2e7c6..37ce2ac87 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go
@@ -5,14 +5,20 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import (
"context"
+ "errors"
"fmt"
"sync"
"sync/atomic"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/sdk"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
+ "go.opentelemetry.io/otel/sdk/trace/internal/x"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/noop"
@@ -20,6 +26,7 @@ import (
const (
defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer"
+ selfObsScopeName = "go.opentelemetry.io/otel/sdk/trace"
)
// tracerProviderConfig.
@@ -45,7 +52,7 @@ type tracerProviderConfig struct {
}
// MarshalLog is the marshaling function used by the logging system to represent this Provider.
-func (cfg tracerProviderConfig) MarshalLog() interface{} {
+func (cfg tracerProviderConfig) MarshalLog() any {
return struct {
SpanProcessors []SpanProcessor
SamplerType string
@@ -156,8 +163,18 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
t, ok := p.namedTracer[is]
if !ok {
t = &tracer{
- provider: p,
- instrumentationScope: is,
+ provider: p,
+ instrumentationScope: is,
+ selfObservabilityEnabled: x.SelfObservability.Enabled(),
+ }
+ if t.selfObservabilityEnabled {
+ var err error
+ t.spanLiveMetric, t.spanStartedMetric, err = newInst()
+ if err != nil {
+ msg := "failed to create self-observability metrics for tracer: %w"
+ err := fmt.Errorf(msg, err)
+ otel.Handle(err)
+ }
}
p.namedTracer[is] = t
}
@@ -184,6 +201,23 @@ func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.T
return t
}
+func newInst() (otelconv.SDKSpanLive, otelconv.SDKSpanStarted, error) {
+ m := otel.GetMeterProvider().Meter(
+ selfObsScopeName,
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ var err error
+ spanLiveMetric, e := otelconv.NewSDKSpanLive(m)
+ err = errors.Join(err, e)
+
+ spanStartedMetric, e := otelconv.NewSDKSpanStarted(m)
+ err = errors.Join(err, e)
+
+ return spanLiveMetric, spanStartedMetric, err
+}
+
// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors.
func (p *TracerProvider) RegisterSpanProcessor(sp SpanProcessor) {
// This check prevents calls during a shutdown.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
index aa7b262d0..689663d48 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go
@@ -110,14 +110,14 @@ func TraceIDRatioBased(fraction float64) Sampler {
type alwaysOnSampler struct{}
-func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
+func (alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult {
return SamplingResult{
Decision: RecordAndSample,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
-func (as alwaysOnSampler) Description() string {
+func (alwaysOnSampler) Description() string {
return "AlwaysOnSampler"
}
@@ -131,14 +131,14 @@ func AlwaysSample() Sampler {
type alwaysOffSampler struct{}
-func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
+func (alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult {
return SamplingResult{
Decision: Drop,
Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(),
}
}
-func (as alwaysOffSampler) Description() string {
+func (alwaysOffSampler) Description() string {
return "AlwaysOffSampler"
}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
index 664e13e03..411d9ccdd 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go
@@ -39,7 +39,7 @@ func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor {
}
// OnStart does nothing.
-func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
+func (*simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd immediately exports a ReadOnlySpan.
func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) {
@@ -104,13 +104,13 @@ func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error {
}
// ForceFlush does nothing as there is no data to flush.
-func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error {
+func (*simpleSpanProcessor) ForceFlush(context.Context) error {
return nil
}
// MarshalLog is the marshaling function used by the logging system to represent
// this Span Processor.
-func (ssp *simpleSpanProcessor) MarshalLog() interface{} {
+func (ssp *simpleSpanProcessor) MarshalLog() any {
return struct {
Type string
Exporter SpanExporter
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
index d511d0f27..63aa33780 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go
@@ -35,7 +35,7 @@ type snapshot struct {
var _ ReadOnlySpan = snapshot{}
-func (s snapshot) private() {}
+func (snapshot) private() {}
// Name returns the name of the span.
func (s snapshot) Name() string {
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
index 1785a4bbb..b376051fb 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/instrumentation"
"go.opentelemetry.io/otel/sdk/resource"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -61,6 +61,7 @@ type ReadOnlySpan interface {
InstrumentationScope() instrumentation.Scope
// InstrumentationLibrary returns information about the instrumentation
// library that created the span.
+ //
// Deprecated: please use InstrumentationScope instead.
InstrumentationLibrary() instrumentation.Library //nolint:staticcheck // This method needs to be define for backwards compatibility
// Resource returns information about the entity that produced the span.
@@ -165,7 +166,7 @@ func (s *recordingSpan) SpanContext() trace.SpanContext {
return s.spanContext
}
-// IsRecording returns if this span is being recorded. If this span has ended
+// IsRecording reports whether this span is being recorded. If this span has ended
// this will return false.
func (s *recordingSpan) IsRecording() bool {
if s == nil {
@@ -177,7 +178,7 @@ func (s *recordingSpan) IsRecording() bool {
return s.isRecording()
}
-// isRecording returns if this span is being recorded. If this span has ended
+// isRecording reports whether this span is being recorded. If this span has ended
// this will return false.
//
// This method assumes s.mu.Lock is held by the caller.
@@ -495,6 +496,16 @@ func (s *recordingSpan) End(options ...trace.SpanEndOption) {
}
s.mu.Unlock()
+ if s.tracer.selfObservabilityEnabled {
+ defer func() {
+ // Add the span to the context to ensure the metric is recorded
+ // with the correct span context.
+ ctx := trace.ContextWithSpan(context.Background(), s)
+ set := spanLiveSet(s.spanContext.IsSampled())
+ s.tracer.spanLiveMetric.AddSet(ctx, -1, set)
+ }()
+ }
+
sps := s.tracer.provider.getSpanProcessors()
if len(sps) == 0 {
return
@@ -545,7 +556,7 @@ func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) {
s.addEvent(semconv.ExceptionEventName, opts...)
}
-func typeStr(i interface{}) string {
+func typeStr(i any) string {
t := reflect.TypeOf(i)
if t.PkgPath() == "" && t.Name() == "" {
// Likely a builtin type.
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
index 0b65ae9ab..e965c4cce 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go
@@ -7,7 +7,9 @@ import (
"context"
"time"
+ "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/instrumentation"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/embedded"
)
@@ -17,6 +19,10 @@ type tracer struct {
provider *TracerProvider
instrumentationScope instrumentation.Scope
+
+ selfObservabilityEnabled bool
+ spanLiveMetric otelconv.SDKSpanLive
+ spanStartedMetric otelconv.SDKSpanStarted
}
var _ trace.Tracer = &tracer{}
@@ -46,17 +52,25 @@ func (tr *tracer) Start(
}
s := tr.newSpan(ctx, name, &config)
+ newCtx := trace.ContextWithSpan(ctx, s)
+ if tr.selfObservabilityEnabled {
+ psc := trace.SpanContextFromContext(ctx)
+ set := spanStartedSet(psc, s)
+ tr.spanStartedMetric.AddSet(newCtx, 1, set)
+ }
+
if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() {
sps := tr.provider.getSpanProcessors()
for _, sp := range sps {
+ // Use original context.
sp.sp.OnStart(ctx, rw)
}
}
if rtt, ok := s.(runtimeTracer); ok {
- ctx = rtt.runtimeTrace(ctx)
+ newCtx = rtt.runtimeTrace(newCtx)
}
- return trace.ContextWithSpan(ctx, s), s
+ return newCtx, s
}
type runtimeTracer interface {
@@ -112,11 +126,12 @@ func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanCo
if !isRecording(samplingResult) {
return tr.newNonRecordingSpan(sc)
}
- return tr.newRecordingSpan(psc, sc, name, samplingResult, config)
+ return tr.newRecordingSpan(ctx, psc, sc, name, samplingResult, config)
}
// newRecordingSpan returns a new configured recordingSpan.
func (tr *tracer) newRecordingSpan(
+ ctx context.Context,
psc, sc trace.SpanContext,
name string,
sr SamplingResult,
@@ -153,6 +168,14 @@ func (tr *tracer) newRecordingSpan(
s.SetAttributes(sr.Attributes...)
s.SetAttributes(config.Attributes()...)
+ if tr.selfObservabilityEnabled {
+ // Propagate any existing values from the context with the new span to
+ // the measurement context.
+ ctx = trace.ContextWithSpan(ctx, s)
+ set := spanLiveSet(s.spanContext.IsSampled())
+ tr.spanLiveMetric.AddSet(ctx, 1, set)
+ }
+
return s
}
@@ -160,3 +183,112 @@ func (tr *tracer) newRecordingSpan(
func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan {
return nonRecordingSpan{tracer: tr, sc: sc}
}
+
+type parentState int
+
+const (
+ parentStateNoParent parentState = iota
+ parentStateLocalParent
+ parentStateRemoteParent
+)
+
+type samplingState int
+
+const (
+ samplingStateDrop samplingState = iota
+ samplingStateRecordOnly
+ samplingStateRecordAndSample
+)
+
+type spanStartedSetKey struct {
+ parent parentState
+ sampling samplingState
+}
+
+var spanStartedSetCache = map[spanStartedSetKey]attribute.Set{
+ {parentStateNoParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+ {parentStateLocalParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+ {parentStateRemoteParent, samplingStateDrop}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultDrop),
+ ),
+
+ {parentStateNoParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+ {parentStateLocalParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+ {parentStateRemoteParent, samplingStateRecordOnly}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordOnly),
+ ),
+
+ {parentStateNoParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginNone),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+ {parentStateLocalParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginLocal),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+ {parentStateRemoteParent, samplingStateRecordAndSample}: attribute.NewSet(
+ otelconv.SDKSpanStarted{}.AttrSpanParentOrigin(otelconv.SpanParentOriginRemote),
+ otelconv.SDKSpanStarted{}.AttrSpanSamplingResult(otelconv.SpanSamplingResultRecordAndSample),
+ ),
+}
+
+func spanStartedSet(psc trace.SpanContext, span trace.Span) attribute.Set {
+ key := spanStartedSetKey{
+ parent: parentStateNoParent,
+ sampling: samplingStateDrop,
+ }
+
+ if psc.IsValid() {
+ if psc.IsRemote() {
+ key.parent = parentStateRemoteParent
+ } else {
+ key.parent = parentStateLocalParent
+ }
+ }
+
+ if span.IsRecording() {
+ if span.SpanContext().IsSampled() {
+ key.sampling = samplingStateRecordAndSample
+ } else {
+ key.sampling = samplingStateRecordOnly
+ }
+ }
+
+ return spanStartedSetCache[key]
+}
+
+type spanLiveSetKey struct {
+ sampled bool
+}
+
+var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
+ {true}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ ),
+ {false}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ ),
+}
+
+func spanLiveSet(sampled bool) attribute.Set {
+ key := spanLiveSetKey{sampled: sampled}
+ return spanLiveSetCache[key]
+}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go b/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
deleted file mode 100644
index b84dd2c5e..000000000
--- a/vendor/go.opentelemetry.io/otel/sdk/trace/version.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-package trace // import "go.opentelemetry.io/otel/sdk/trace"
-
-// version is the current release version of the metric SDK in use.
-func version() string {
- return "1.16.0-rc.1"
-}
diff --git a/vendor/go.opentelemetry.io/otel/sdk/version.go b/vendor/go.opentelemetry.io/otel/sdk/version.go
index c0217af6b..7f97cc31e 100644
--- a/vendor/go.opentelemetry.io/otel/sdk/version.go
+++ b/vendor/go.opentelemetry.io/otel/sdk/version.go
@@ -6,5 +6,5 @@ package sdk // import "go.opentelemetry.io/otel/sdk"
// Version is the current release version of the OpenTelemetry SDK in use.
func Version() string {
- return "1.37.0"
+ return "1.38.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
deleted file mode 100644
index 2de1fc3c6..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-# Semconv v1.26.0
-
-[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.26.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
deleted file mode 100644
index d8dc822b2..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/attribute_group.go
+++ /dev/null
@@ -1,8996 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-import "go.opentelemetry.io/otel/attribute"
-
-// The Android platform on which the Android application is running.
-const (
- // AndroidOSAPILevelKey is the attribute Key conforming to the
- // "android.os.api_level" semantic conventions. It represents the uniquely
- // identifies the framework API revision offered by a version
- // (`os.version`) of the android operating system. More information can be
- // found
- // [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '33', '32'
- AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
-)
-
-// AndroidOSAPILevel returns an attribute KeyValue conforming to the
-// "android.os.api_level" semantic conventions. It represents the uniquely
-// identifies the framework API revision offered by a version (`os.version`) of
-// the android operating system. More information can be found
-// [here](https://developer.android.com/guide/topics/manifest/uses-sdk-element#APILevels).
-func AndroidOSAPILevel(val string) attribute.KeyValue {
- return AndroidOSAPILevelKey.String(val)
-}
-
-// ASP.NET Core attributes
-const (
- // AspnetcoreRateLimitingResultKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.result" semantic conventions. It represents
- // the rate-limiting result, shows whether the lease was acquired or
- // contains a rejection reason
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'acquired', 'request_canceled'
- AspnetcoreRateLimitingResultKey = attribute.Key("aspnetcore.rate_limiting.result")
-
- // AspnetcoreDiagnosticsHandlerTypeKey is the attribute Key conforming to
- // the "aspnetcore.diagnostics.handler.type" semantic conventions. It
- // represents the full type name of the
- // [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
- // implementation that handled the exception.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (if and only if the exception
- // was handled by this handler.)
- // Stability: stable
- // Examples: 'Contoso.MyHandler'
- AspnetcoreDiagnosticsHandlerTypeKey = attribute.Key("aspnetcore.diagnostics.handler.type")
-
- // AspnetcoreDiagnosticsExceptionResultKey is the attribute Key conforming
- // to the "aspnetcore.diagnostics.exception.result" semantic conventions.
- // It represents the aSP.NET Core exception middleware handling result
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'handled', 'unhandled'
- AspnetcoreDiagnosticsExceptionResultKey = attribute.Key("aspnetcore.diagnostics.exception.result")
-
- // AspnetcoreRateLimitingPolicyKey is the attribute Key conforming to the
- // "aspnetcore.rate_limiting.policy" semantic conventions. It represents
- // the rate limiting policy name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'fixed', 'sliding', 'token'
- AspnetcoreRateLimitingPolicyKey = attribute.Key("aspnetcore.rate_limiting.policy")
-
- // AspnetcoreRequestIsUnhandledKey is the attribute Key conforming to the
- // "aspnetcore.request.is_unhandled" semantic conventions. It represents
- // the flag indicating if request was handled by the application pipeline.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRequestIsUnhandledKey = attribute.Key("aspnetcore.request.is_unhandled")
-
- // AspnetcoreRoutingIsFallbackKey is the attribute Key conforming to the
- // "aspnetcore.routing.is_fallback" semantic conventions. It represents a
- // value that indicates whether the matched route is a fallback route.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: True
- AspnetcoreRoutingIsFallbackKey = attribute.Key("aspnetcore.routing.is_fallback")
-
- // AspnetcoreRoutingMatchStatusKey is the attribute Key conforming to the
- // "aspnetcore.routing.match_status" semantic conventions. It represents
- // the match result - success or failure
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'success', 'failure'
- AspnetcoreRoutingMatchStatusKey = attribute.Key("aspnetcore.routing.match_status")
-)
-
-var (
- // Lease was acquired
- AspnetcoreRateLimitingResultAcquired = AspnetcoreRateLimitingResultKey.String("acquired")
- // Lease request was rejected by the endpoint limiter
- AspnetcoreRateLimitingResultEndpointLimiter = AspnetcoreRateLimitingResultKey.String("endpoint_limiter")
- // Lease request was rejected by the global limiter
- AspnetcoreRateLimitingResultGlobalLimiter = AspnetcoreRateLimitingResultKey.String("global_limiter")
- // Lease request was canceled
- AspnetcoreRateLimitingResultRequestCanceled = AspnetcoreRateLimitingResultKey.String("request_canceled")
-)
-
-var (
- // Exception was handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultHandled = AspnetcoreDiagnosticsExceptionResultKey.String("handled")
- // Exception was not handled by the exception handling middleware
- AspnetcoreDiagnosticsExceptionResultUnhandled = AspnetcoreDiagnosticsExceptionResultKey.String("unhandled")
- // Exception handling was skipped because the response had started
- AspnetcoreDiagnosticsExceptionResultSkipped = AspnetcoreDiagnosticsExceptionResultKey.String("skipped")
- // Exception handling didn't run because the request was aborted
- AspnetcoreDiagnosticsExceptionResultAborted = AspnetcoreDiagnosticsExceptionResultKey.String("aborted")
-)
-
-var (
- // Match succeeded
- AspnetcoreRoutingMatchStatusSuccess = AspnetcoreRoutingMatchStatusKey.String("success")
- // Match failed
- AspnetcoreRoutingMatchStatusFailure = AspnetcoreRoutingMatchStatusKey.String("failure")
-)
-
-// AspnetcoreDiagnosticsHandlerType returns an attribute KeyValue conforming
-// to the "aspnetcore.diagnostics.handler.type" semantic conventions. It
-// represents the full type name of the
-// [`IExceptionHandler`](https://learn.microsoft.com/dotnet/api/microsoft.aspnetcore.diagnostics.iexceptionhandler)
-// implementation that handled the exception.
-func AspnetcoreDiagnosticsHandlerType(val string) attribute.KeyValue {
- return AspnetcoreDiagnosticsHandlerTypeKey.String(val)
-}
-
-// AspnetcoreRateLimitingPolicy returns an attribute KeyValue conforming to
-// the "aspnetcore.rate_limiting.policy" semantic conventions. It represents
-// the rate limiting policy name.
-func AspnetcoreRateLimitingPolicy(val string) attribute.KeyValue {
- return AspnetcoreRateLimitingPolicyKey.String(val)
-}
-
-// AspnetcoreRequestIsUnhandled returns an attribute KeyValue conforming to
-// the "aspnetcore.request.is_unhandled" semantic conventions. It represents
-// the flag indicating if request was handled by the application pipeline.
-func AspnetcoreRequestIsUnhandled(val bool) attribute.KeyValue {
- return AspnetcoreRequestIsUnhandledKey.Bool(val)
-}
-
-// AspnetcoreRoutingIsFallback returns an attribute KeyValue conforming to
-// the "aspnetcore.routing.is_fallback" semantic conventions. It represents a
-// value that indicates whether the matched route is a fallback route.
-func AspnetcoreRoutingIsFallback(val bool) attribute.KeyValue {
- return AspnetcoreRoutingIsFallbackKey.Bool(val)
-}
-
-// Generic attributes for AWS services.
-const (
- // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
- // semantic conventions. It represents the AWS request ID as returned in
- // the response headers `x-amz-request-id` or `x-amz-requestid`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '79b9da39-b7ae-508a-a6bc-864b2829c622', 'C9ER4AJX75574TDJ'
- AWSRequestIDKey = attribute.Key("aws.request_id")
-)
-
-// AWSRequestID returns an attribute KeyValue conforming to the
-// "aws.request_id" semantic conventions. It represents the AWS request ID as
-// returned in the response headers `x-amz-request-id` or `x-amz-requestid`.
-func AWSRequestID(val string) attribute.KeyValue {
- return AWSRequestIDKey.String(val)
-}
-
-// Attributes for AWS DynamoDB.
-const (
- // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to
- // the "aws.dynamodb.attribute_definitions" semantic conventions. It
- // represents the JSON-serialized value of each item in the
- // `AttributeDefinitions` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "AttributeName": "string", "AttributeType": "string" }'
- AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
-
- // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
- // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
- // value of the `AttributesToGet` request parameter.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'lives', 'id'
- AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
-
- // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
- // "aws.dynamodb.consistent_read" semantic conventions. It represents the
- // value of the `ConsistentRead` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
-
- // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
- // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
- // JSON-serialized value of each item in the `ConsumedCapacity` response
- // field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": {
- // "string" : { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "LocalSecondaryIndexes": { "string" :
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }, "ReadCapacityUnits": number, "Table":
- // { "CapacityUnits": number, "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number }, "TableName": "string",
- // "WriteCapacityUnits": number }'
- AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
-
- // AWSDynamoDBCountKey is the attribute Key conforming to the
- // "aws.dynamodb.count" semantic conventions. It represents the value of
- // the `Count` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
-
- // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
- // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents
- // the value of the `ExclusiveStartTableName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'CatsTable'
- AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
-
- // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key
- // conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
- // conventions. It represents the JSON-serialized value of each item in the
- // `GlobalSecondaryIndexUpdates` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
- // "ProvisionedThroughput": { "ReadCapacityUnits": number,
- // "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
-
- // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `GlobalSecondaryIndexes` request field
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName":
- // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
- // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
- // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }'
- AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
-
- // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
- // "aws.dynamodb.index_name" semantic conventions. It represents the value
- // of the `IndexName` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'name_to_group'
- AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
-
- // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to
- // the "aws.dynamodb.item_collection_metrics" semantic conventions. It
- // represents the JSON-serialized value of the `ItemCollectionMetrics`
- // response field.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B":
- // blob, "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": {
- // "string" : "AttributeValue" }, "N": "string", "NS": [ "string" ],
- // "NULL": boolean, "S": "string", "SS": [ "string" ] } },
- // "SizeEstimateRangeGB": [ number ] } ] }'
- AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
-
- // AWSDynamoDBLimitKey is the attribute Key conforming to the
- // "aws.dynamodb.limit" semantic conventions. It represents the value of
- // the `Limit` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
-
- // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to
- // the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
- // represents the JSON-serialized value of each item of the
- // `LocalSecondaryIndexes` request field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '{ "IndexARN": "string", "IndexName": "string",
- // "IndexSizeBytes": number, "ItemCount": number, "KeySchema": [ {
- // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
- // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" } }'
- AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
-
- // AWSDynamoDBProjectionKey is the attribute Key conforming to the
- // "aws.dynamodb.projection" semantic conventions. It represents the value
- // of the `ProjectionExpression` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Title', 'Title, Price, Color', 'Title, Description,
- // RelatedItems, ProductReviews'
- AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
-
- // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to
- // the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
- // represents the value of the `ProvisionedThroughput.ReadCapacityUnits`
- // request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
-
- // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming
- // to the "aws.dynamodb.provisioned_write_capacity" semantic conventions.
- // It represents the value of the
- // `ProvisionedThroughput.WriteCapacityUnits` request parameter.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0, 2.0
- AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
-
- // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
- // "aws.dynamodb.scan_forward" semantic conventions. It represents the
- // value of the `ScanIndexForward` request parameter.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
-
- // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
- // "aws.dynamodb.scanned_count" semantic conventions. It represents the
- // value of the `ScannedCount` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 50
- AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
-
- // AWSDynamoDBSegmentKey is the attribute Key conforming to the
- // "aws.dynamodb.segment" semantic conventions. It represents the value of
- // the `Segment` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
-
- // AWSDynamoDBSelectKey is the attribute Key conforming to the
- // "aws.dynamodb.select" semantic conventions. It represents the value of
- // the `Select` request parameter.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ALL_ATTRIBUTES', 'COUNT'
- AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
-
- // AWSDynamoDBTableCountKey is the attribute Key conforming to the
- // "aws.dynamodb.table_count" semantic conventions. It represents the
- // number of items in the `TableNames` response parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 20
- AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
-
- // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
- // "aws.dynamodb.table_names" semantic conventions. It represents the keys
- // in the `RequestItems` object field.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Users', 'Cats'
- AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
-
- // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
- // "aws.dynamodb.total_segments" semantic conventions. It represents the
- // value of the `TotalSegments` request parameter.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
-)
-
-// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming
-// to the "aws.dynamodb.attribute_definitions" semantic conventions. It
-// represents the JSON-serialized value of each item in the
-// `AttributeDefinitions` request field.
-func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
-}
-
-// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to
-// the "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
-// value of the `AttributesToGet` request parameter.
-func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
- return AWSDynamoDBAttributesToGetKey.StringSlice(val)
-}
-
-// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
-// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
-// of the `ConsistentRead` request parameter.
-func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
- return AWSDynamoDBConsistentReadKey.Bool(val)
-}
-
-// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to
-// the "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
-// JSON-serialized value of each item in the `ConsumedCapacity` response field.
-func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
- return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
-}
-
-// AWSDynamoDBCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.count" semantic conventions. It represents the value of the
-// `Count` response parameter.
-func AWSDynamoDBCount(val int) attribute.KeyValue {
- return AWSDynamoDBCountKey.Int(val)
-}
-
-// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming
-// to the "aws.dynamodb.exclusive_start_table" semantic conventions. It
-// represents the value of the `ExclusiveStartTableName` request parameter.
-func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
- return AWSDynamoDBExclusiveStartTableKey.String(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
-// conventions. It represents the JSON-serialized value of each item in the
-// `GlobalSecondaryIndexUpdates` request field.
-func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
-}
-
-// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue
-// conforming to the "aws.dynamodb.global_secondary_indexes" semantic
-// conventions. It represents the JSON-serialized value of each item of the
-// `GlobalSecondaryIndexes` request field
-func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
-// "aws.dynamodb.index_name" semantic conventions. It represents the value of
-// the `IndexName` request parameter.
-func AWSDynamoDBIndexName(val string) attribute.KeyValue {
- return AWSDynamoDBIndexNameKey.String(val)
-}
-
-// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming
-// to the "aws.dynamodb.item_collection_metrics" semantic conventions. It
-// represents the JSON-serialized value of the `ItemCollectionMetrics` response
-// field.
-func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
- return AWSDynamoDBItemCollectionMetricsKey.String(val)
-}
-
-// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
-// "aws.dynamodb.limit" semantic conventions. It represents the value of the
-// `Limit` request parameter.
-func AWSDynamoDBLimit(val int) attribute.KeyValue {
- return AWSDynamoDBLimitKey.Int(val)
-}
-
-// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming
-// to the "aws.dynamodb.local_secondary_indexes" semantic conventions. It
-// represents the JSON-serialized value of each item of the
-// `LocalSecondaryIndexes` request field.
-func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
- return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
-}
-
-// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
-// "aws.dynamodb.projection" semantic conventions. It represents the value of
-// the `ProjectionExpression` request parameter.
-func AWSDynamoDBProjection(val string) attribute.KeyValue {
- return AWSDynamoDBProjectionKey.String(val)
-}
-
-// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_read_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.ReadCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue
-// conforming to the "aws.dynamodb.provisioned_write_capacity" semantic
-// conventions. It represents the value of the
-// `ProvisionedThroughput.WriteCapacityUnits` request parameter.
-func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
- return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
-}
-
-// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
-// the `ScanIndexForward` request parameter.
-func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
- return AWSDynamoDBScanForwardKey.Bool(val)
-}
-
-// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.scanned_count" semantic conventions. It represents the value
-// of the `ScannedCount` response parameter.
-func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
- return AWSDynamoDBScannedCountKey.Int(val)
-}
-
-// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
-// "aws.dynamodb.segment" semantic conventions. It represents the value of the
-// `Segment` request parameter.
-func AWSDynamoDBSegment(val int) attribute.KeyValue {
- return AWSDynamoDBSegmentKey.Int(val)
-}
-
-// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
-// "aws.dynamodb.select" semantic conventions. It represents the value of the
-// `Select` request parameter.
-func AWSDynamoDBSelect(val string) attribute.KeyValue {
- return AWSDynamoDBSelectKey.String(val)
-}
-
-// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_count" semantic conventions. It represents the number of
-// items in the `TableNames` response parameter.
-func AWSDynamoDBTableCount(val int) attribute.KeyValue {
- return AWSDynamoDBTableCountKey.Int(val)
-}
-
-// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
-// "aws.dynamodb.table_names" semantic conventions. It represents the keys in
-// the `RequestItems` object field.
-func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
- return AWSDynamoDBTableNamesKey.StringSlice(val)
-}
-
-// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
-// "aws.dynamodb.total_segments" semantic conventions. It represents the value
-// of the `TotalSegments` request parameter.
-func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
- return AWSDynamoDBTotalSegmentsKey.Int(val)
-}
-
-// Attributes for AWS Elastic Container Service (ECS).
-const (
- // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
- // semantic conventions. It represents the ID of a running ECS task. The ID
- // MUST be extracted from `task.arn`.
- //
- // Type: string
- // RequirementLevel: ConditionallyRequired (If and only if `task.arn` is
- // populated.)
- // Stability: experimental
- // Examples: '10838bed-421f-43ef-870a-f43feacbbb5b',
- // '23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
-
- // AWSECSClusterARNKey is the attribute Key conforming to the
- // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
- // [ECS
- // cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
-
- // AWSECSContainerARNKey is the attribute Key conforming to the
- // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
- // Resource Name (ARN) of an [ECS container
- // instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9'
- AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
-
- // AWSECSLaunchtypeKey is the attribute Key conforming to the
- // "aws.ecs.launchtype" semantic conventions. It represents the [launch
- // type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html)
- // for an ECS task.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
-
- // AWSECSTaskARNKey is the attribute Key conforming to the
- // "aws.ecs.task.arn" semantic conventions. It represents the ARN of a
- // running [ECS
- // task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b',
- // 'arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd'
- AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
-
- // AWSECSTaskFamilyKey is the attribute Key conforming to the
- // "aws.ecs.task.family" semantic conventions. It represents the family
- // name of the [ECS task
- // definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
- // used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-family'
- AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
-
- // AWSECSTaskRevisionKey is the attribute Key conforming to the
- // "aws.ecs.task.revision" semantic conventions. It represents the revision
- // for the task definition used to create the ECS task.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '8', '26'
- AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
-)
-
-var (
- // ec2
- AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
- // fargate
- AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
-)
-
-// AWSECSTaskID returns an attribute KeyValue conforming to the
-// "aws.ecs.task.id" semantic conventions. It represents the ID of a running
-// ECS task. The ID MUST be extracted from `task.arn`.
-func AWSECSTaskID(val string) attribute.KeyValue {
- return AWSECSTaskIDKey.String(val)
-}
-
-// AWSECSClusterARN returns an attribute KeyValue conforming to the
-// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an [ECS
-// cluster](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html).
-func AWSECSClusterARN(val string) attribute.KeyValue {
- return AWSECSClusterARNKey.String(val)
-}
-
-// AWSECSContainerARN returns an attribute KeyValue conforming to the
-// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
-// Resource Name (ARN) of an [ECS container
-// instance](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html).
-func AWSECSContainerARN(val string) attribute.KeyValue {
- return AWSECSContainerARNKey.String(val)
-}
-
-// AWSECSTaskARN returns an attribute KeyValue conforming to the
-// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
-// [ECS
-// task](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids).
-func AWSECSTaskARN(val string) attribute.KeyValue {
- return AWSECSTaskARNKey.String(val)
-}
-
-// AWSECSTaskFamily returns an attribute KeyValue conforming to the
-// "aws.ecs.task.family" semantic conventions. It represents the family name of
-// the [ECS task
-// definition](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html)
-// used to create the ECS task.
-func AWSECSTaskFamily(val string) attribute.KeyValue {
- return AWSECSTaskFamilyKey.String(val)
-}
-
-// AWSECSTaskRevision returns an attribute KeyValue conforming to the
-// "aws.ecs.task.revision" semantic conventions. It represents the revision for
-// the task definition used to create the ECS task.
-func AWSECSTaskRevision(val string) attribute.KeyValue {
- return AWSECSTaskRevisionKey.String(val)
-}
-
-// Attributes for AWS Elastic Kubernetes Service (EKS).
-const (
- // AWSEKSClusterARNKey is the attribute Key conforming to the
- // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an
- // EKS cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster'
- AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
-)
-
-// AWSEKSClusterARN returns an attribute KeyValue conforming to the
-// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
-// cluster.
-func AWSEKSClusterARN(val string) attribute.KeyValue {
- return AWSEKSClusterARNKey.String(val)
-}
-
-// Attributes for AWS Logs.
-const (
- // AWSLogGroupARNsKey is the attribute Key conforming to the
- // "aws.log.group.arns" semantic conventions. It represents the Amazon
- // Resource Name(s) (ARN) of the AWS log group(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*'
- // Note: See the [log group ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
-
- // AWSLogGroupNamesKey is the attribute Key conforming to the
- // "aws.log.group.names" semantic conventions. It represents the name(s) of
- // the AWS log group(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/aws/lambda/my-function', 'opentelemetry-service'
- // Note: Multiple log groups must be supported for cases like
- // multi-container applications, where a single application has sidecar
- // containers, and each write to their own log group.
- AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
-
- // AWSLogStreamARNsKey is the attribute Key conforming to the
- // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of
- // the AWS log stream(s).
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- // Note: See the [log stream ARN format
- // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format).
- // One log group can contain several log streams, so these ARNs necessarily
- // identify both a log group and a log stream.
- AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
-
- // AWSLogStreamNamesKey is the attribute Key conforming to the
- // "aws.log.stream.names" semantic conventions. It represents the name(s)
- // of the AWS log stream(s) an application is writing to.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b'
- AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
-)
-
-// AWSLogGroupARNs returns an attribute KeyValue conforming to the
-// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
-// Name(s) (ARN) of the AWS log group(s).
-func AWSLogGroupARNs(val ...string) attribute.KeyValue {
- return AWSLogGroupARNsKey.StringSlice(val)
-}
-
-// AWSLogGroupNames returns an attribute KeyValue conforming to the
-// "aws.log.group.names" semantic conventions. It represents the name(s) of the
-// AWS log group(s) an application is writing to.
-func AWSLogGroupNames(val ...string) attribute.KeyValue {
- return AWSLogGroupNamesKey.StringSlice(val)
-}
-
-// AWSLogStreamARNs returns an attribute KeyValue conforming to the
-// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
-// AWS log stream(s).
-func AWSLogStreamARNs(val ...string) attribute.KeyValue {
- return AWSLogStreamARNsKey.StringSlice(val)
-}
-
-// AWSLogStreamNames returns an attribute KeyValue conforming to the
-// "aws.log.stream.names" semantic conventions. It represents the name(s) of
-// the AWS log stream(s) an application is writing to.
-func AWSLogStreamNames(val ...string) attribute.KeyValue {
- return AWSLogStreamNamesKey.StringSlice(val)
-}
-
-// Attributes for AWS Lambda.
-const (
- // AWSLambdaInvokedARNKey is the attribute Key conforming to the
- // "aws.lambda.invoked_arn" semantic conventions. It represents the full
- // invoked ARN as provided on the `Context` passed to the function
- // (`Lambda-Runtime-Invoked-Function-ARN` header on the
- // `/runtime/invocation/next` applicable).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias'
- // Note: This may be different from `cloud.resource_id` if an alias is
- // involved.
- AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
-)
-
-// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
-// "aws.lambda.invoked_arn" semantic conventions. It represents the full
-// invoked ARN as provided on the `Context` passed to the function
-// (`Lambda-Runtime-Invoked-Function-ARN` header on the
-// `/runtime/invocation/next` applicable).
-func AWSLambdaInvokedARN(val string) attribute.KeyValue {
- return AWSLambdaInvokedARNKey.String(val)
-}
-
-// Attributes for AWS S3.
-const (
- // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
- // semantic conventions. It represents the S3 bucket name the request
- // refers to. Corresponds to the `--bucket` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'some-bucket-name'
- // Note: The `bucket` attribute is applicable to all S3 operations that
- // reference a bucket, i.e. that require the bucket name as a mandatory
- // parameter.
- // This applies to almost all S3 operations except `list-buckets`.
- AWSS3BucketKey = attribute.Key("aws.s3.bucket")
-
- // AWSS3CopySourceKey is the attribute Key conforming to the
- // "aws.s3.copy_source" semantic conventions. It represents the source
- // object (in the form `bucket`/`key`) for the copy operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `copy_source` attribute applies to S3 copy operations and
- // corresponds to the `--copy-source` parameter
- // of the [copy-object operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html).
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
-
- // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
- // semantic conventions. It represents the delete request container that
- // specifies the objects to be deleted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'Objects=[{Key=string,VersionID=string},{Key=string,VersionID=string}],Quiet=boolean'
- // Note: The `delete` attribute is only applicable to the
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // operation.
- // The `delete` attribute corresponds to the `--delete` parameter of the
- // [delete-objects operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html).
- AWSS3DeleteKey = attribute.Key("aws.s3.delete")
-
- // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
- // conventions. It represents the S3 object key the request refers to.
- // Corresponds to the `--key` parameter of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // operations.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'someFile.yml'
- // Note: The `key` attribute is applicable to all object-related S3
- // operations, i.e. that require the object key as a mandatory parameter.
- // This applies in particular to the following operations:
- //
- // -
- // [copy-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html)
- // -
- // [delete-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html)
- // -
- // [get-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html)
- // -
- // [head-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html)
- // -
- // [put-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html)
- // -
- // [restore-object](https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html)
- // -
- // [select-object-content](https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html)
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [create-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3KeyKey = attribute.Key("aws.s3.key")
-
- // AWSS3PartNumberKey is the attribute Key conforming to the
- // "aws.s3.part_number" semantic conventions. It represents the part number
- // of the part being uploaded in a multipart-upload operation. This is a
- // positive integer between 1 and 10,000.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3456
- // Note: The `part_number` attribute is only applicable to the
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // and
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- // operations.
- // The `part_number` attribute corresponds to the `--part-number` parameter
- // of the
- // [upload-part operation within the S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html).
- AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
-
- // AWSS3UploadIDKey is the attribute Key conforming to the
- // "aws.s3.upload_id" semantic conventions. It represents the upload ID
- // that identifies the multipart upload.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ'
- // Note: The `upload_id` attribute applies to S3 multipart-upload
- // operations and corresponds to the `--upload-id` parameter
- // of the [S3
- // API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
- // multipart operations.
- // This applies in particular to the following operations:
- //
- // -
- // [abort-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html)
- // -
- // [complete-multipart-upload](https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html)
- // -
- // [list-parts](https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html)
- // -
- // [upload-part](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html)
- // -
- // [upload-part-copy](https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html)
- AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
-)
-
-// AWSS3Bucket returns an attribute KeyValue conforming to the
-// "aws.s3.bucket" semantic conventions. It represents the S3 bucket name the
-// request refers to. Corresponds to the `--bucket` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Bucket(val string) attribute.KeyValue {
- return AWSS3BucketKey.String(val)
-}
-
-// AWSS3CopySource returns an attribute KeyValue conforming to the
-// "aws.s3.copy_source" semantic conventions. It represents the source object
-// (in the form `bucket`/`key`) for the copy operation.
-func AWSS3CopySource(val string) attribute.KeyValue {
- return AWSS3CopySourceKey.String(val)
-}
-
-// AWSS3Delete returns an attribute KeyValue conforming to the
-// "aws.s3.delete" semantic conventions. It represents the delete request
-// container that specifies the objects to be deleted.
-func AWSS3Delete(val string) attribute.KeyValue {
- return AWSS3DeleteKey.String(val)
-}
-
-// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key"
-// semantic conventions. It represents the S3 object key the request refers to.
-// Corresponds to the `--key` parameter of the [S3
-// API](https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html)
-// operations.
-func AWSS3Key(val string) attribute.KeyValue {
- return AWSS3KeyKey.String(val)
-}
-
-// AWSS3PartNumber returns an attribute KeyValue conforming to the
-// "aws.s3.part_number" semantic conventions. It represents the part number of
-// the part being uploaded in a multipart-upload operation. This is a positive
-// integer between 1 and 10,000.
-func AWSS3PartNumber(val int) attribute.KeyValue {
- return AWSS3PartNumberKey.Int(val)
-}
-
-// AWSS3UploadID returns an attribute KeyValue conforming to the
-// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
-// identifies the multipart upload.
-func AWSS3UploadID(val string) attribute.KeyValue {
- return AWSS3UploadIDKey.String(val)
-}
-
-// The web browser attributes
-const (
- // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
- // semantic conventions. It represents the array of brand name and version
- // separated by a space
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: ' Not A;Brand 99', 'Chromium 99', 'Chrome 99'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.brands`).
- BrowserBrandsKey = attribute.Key("browser.brands")
-
- // BrowserLanguageKey is the attribute Key conforming to the
- // "browser.language" semantic conventions. It represents the preferred
- // language of the user using the browser
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'en', 'en-US', 'fr', 'fr-FR'
- // Note: This value is intended to be taken from the Navigator API
- // `navigator.language`.
- BrowserLanguageKey = attribute.Key("browser.language")
-
- // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
- // semantic conventions. It represents a boolean that is true if the
- // browser is running on a mobile device
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.mobile`). If unavailable, this attribute
- // SHOULD be left unset.
- BrowserMobileKey = attribute.Key("browser.mobile")
-
- // BrowserPlatformKey is the attribute Key conforming to the
- // "browser.platform" semantic conventions. It represents the platform on
- // which the browser is running
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Windows', 'macOS', 'Android'
- // Note: This value is intended to be taken from the [UA client hints
- // API](https://wicg.github.io/ua-client-hints/#interface)
- // (`navigator.userAgentData.platform`). If unavailable, the legacy
- // `navigator.platform` API SHOULD NOT be used instead and this attribute
- // SHOULD be left unset in order for the values to be consistent.
- // The list of possible values is defined in the [W3C User-Agent Client
- // Hints
- // specification](https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform).
- // Note that some (but not all) of these values can overlap with values in
- // the [`os.type` and `os.name` attributes](./os.md). However, for
- // consistency, the values in the `browser.platform` attribute should
- // capture the exact value that the user agent provides.
- BrowserPlatformKey = attribute.Key("browser.platform")
-)
-
-// BrowserBrands returns an attribute KeyValue conforming to the
-// "browser.brands" semantic conventions. It represents the array of brand name
-// and version separated by a space
-func BrowserBrands(val ...string) attribute.KeyValue {
- return BrowserBrandsKey.StringSlice(val)
-}
-
-// BrowserLanguage returns an attribute KeyValue conforming to the
-// "browser.language" semantic conventions. It represents the preferred
-// language of the user using the browser
-func BrowserLanguage(val string) attribute.KeyValue {
- return BrowserLanguageKey.String(val)
-}
-
-// BrowserMobile returns an attribute KeyValue conforming to the
-// "browser.mobile" semantic conventions. It represents a boolean that is true
-// if the browser is running on a mobile device
-func BrowserMobile(val bool) attribute.KeyValue {
- return BrowserMobileKey.Bool(val)
-}
-
-// BrowserPlatform returns an attribute KeyValue conforming to the
-// "browser.platform" semantic conventions. It represents the platform on which
-// the browser is running
-func BrowserPlatform(val string) attribute.KeyValue {
- return BrowserPlatformKey.String(val)
-}
-
-// These attributes may be used to describe the client in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ClientAddressKey is the attribute Key conforming to the "client.address"
- // semantic conventions. It represents the client address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'client.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.address` SHOULD represent the client address
- // behind any intermediaries, for example proxies, if it's available.
- ClientAddressKey = attribute.Key("client.address")
-
- // ClientPortKey is the attribute Key conforming to the "client.port"
- // semantic conventions. It represents the client port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- // Note: When observed from the server side, and when communicating through
- // an intermediary, `client.port` SHOULD represent the client port behind
- // any intermediaries, for example proxies, if it's available.
- ClientPortKey = attribute.Key("client.port")
-)
-
-// ClientAddress returns an attribute KeyValue conforming to the
-// "client.address" semantic conventions. It represents the client address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func ClientAddress(val string) attribute.KeyValue {
- return ClientAddressKey.String(val)
-}
-
-// ClientPort returns an attribute KeyValue conforming to the "client.port"
-// semantic conventions. It represents the client port number.
-func ClientPort(val int) attribute.KeyValue {
- return ClientPortKey.Int(val)
-}
-
-// A cloud environment (e.g. GCP, Azure, AWS).
-const (
- // CloudAccountIDKey is the attribute Key conforming to the
- // "cloud.account.id" semantic conventions. It represents the cloud account
- // ID the resource is assigned to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '111111111111', 'opentelemetry'
- CloudAccountIDKey = attribute.Key("cloud.account.id")
-
- // CloudAvailabilityZoneKey is the attribute Key conforming to the
- // "cloud.availability_zone" semantic conventions. It represents the cloud
- // regions often have multiple, isolated locations known as zones to
- // increase availability. Availability zone represents the zone where the
- // resource is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-east-1c'
- // Note: Availability zones are called "zones" on Alibaba Cloud and Google
- // Cloud.
- CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
-
- // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
- // semantic conventions. It represents the cloud platform in use.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The prefix of the service SHOULD match the one specified in
- // `cloud.provider`.
- CloudPlatformKey = attribute.Key("cloud.platform")
-
- // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
- // semantic conventions. It represents the name of the cloud provider.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- CloudProviderKey = attribute.Key("cloud.provider")
-
- // CloudRegionKey is the attribute Key conforming to the "cloud.region"
- // semantic conventions. It represents the geographical region the resource
- // is running.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-central1', 'us-east-1'
- // Note: Refer to your provider's docs to see the available regions, for
- // example [Alibaba Cloud
- // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS
- // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/),
- // [Azure
- // regions](https://azure.microsoft.com/global-infrastructure/geographies/),
- // [Google Cloud regions](https://cloud.google.com/about/locations), or
- // [Tencent Cloud
- // regions](https://www.tencentcloud.com/document/product/213/6091).
- CloudRegionKey = attribute.Key("cloud.region")
-
- // CloudResourceIDKey is the attribute Key conforming to the
- // "cloud.resource_id" semantic conventions. It represents the cloud
- // provider-specific native identifier of the monitored cloud resource
- // (e.g. an
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
- // on AWS, a [fully qualified resource
- // ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id)
- // on Azure, a [full resource
- // name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
- // on GCP)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function',
- // '//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID',
- // '/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/'
- // Note: On some cloud providers, it may not be possible to determine the
- // full ID at startup,
- // so it may be necessary to set `cloud.resource_id` as a span attribute
- // instead.
- //
- // The exact value to use for `cloud.resource_id` depends on the cloud
- // provider.
- // The following well-known definitions MUST be used if you set this
- // attribute and they apply:
- //
- // * **AWS Lambda:** The function
- // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html).
- // Take care not to use the "invoked ARN" directly but replace any
- // [alias
- // suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html)
- // with the resolved function version, as the same runtime instance may
- // be invokable with
- // multiple different aliases.
- // * **GCP:** The [URI of the
- // resource](https://cloud.google.com/iam/docs/full-resource-names)
- // * **Azure:** The [Fully Qualified Resource
- // ID](https://docs.microsoft.com/rest/api/resources/resources/get-by-id)
- // of the invoked function,
- // *not* the function app, having the form
- // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`.
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider.
- CloudResourceIDKey = attribute.Key("cloud.resource_id")
-)
-
-var (
- // Alibaba Cloud Elastic Compute Service
- CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
- // Alibaba Cloud Function Compute
- CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc")
- // Red Hat OpenShift on Alibaba Cloud
- CloudPlatformAlibabaCloudOpenshift = CloudPlatformKey.String("alibaba_cloud_openshift")
- // AWS Elastic Compute Cloud
- CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
- // AWS Elastic Container Service
- CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
- // AWS Elastic Kubernetes Service
- CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
- // AWS Lambda
- CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
- // AWS Elastic Beanstalk
- CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
- // AWS App Runner
- CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
- // Red Hat OpenShift on AWS (ROSA)
- CloudPlatformAWSOpenshift = CloudPlatformKey.String("aws_openshift")
- // Azure Virtual Machines
- CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm")
- // Azure Container Apps
- CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure_container_apps")
- // Azure Container Instances
- CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances")
- // Azure Kubernetes Service
- CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks")
- // Azure Functions
- CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions")
- // Azure App Service
- CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service")
- // Azure Red Hat OpenShift
- CloudPlatformAzureOpenshift = CloudPlatformKey.String("azure_openshift")
- // Google Bare Metal Solution (BMS)
- CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
- // Google Cloud Compute Engine (GCE)
- CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
- // Google Cloud Run
- CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
- // Google Cloud Kubernetes Engine (GKE)
- CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
- // Google Cloud Functions (GCF)
- CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
- // Google Cloud App Engine (GAE)
- CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
- // Red Hat OpenShift on Google Cloud
- CloudPlatformGCPOpenshift = CloudPlatformKey.String("gcp_openshift")
- // Red Hat OpenShift on IBM Cloud
- CloudPlatformIbmCloudOpenshift = CloudPlatformKey.String("ibm_cloud_openshift")
- // Tencent Cloud Cloud Virtual Machine (CVM)
- CloudPlatformTencentCloudCvm = CloudPlatformKey.String("tencent_cloud_cvm")
- // Tencent Cloud Elastic Kubernetes Service (EKS)
- CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
- // Tencent Cloud Serverless Cloud Function (SCF)
- CloudPlatformTencentCloudScf = CloudPlatformKey.String("tencent_cloud_scf")
-)
-
-var (
- // Alibaba Cloud
- CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- CloudProviderAWS = CloudProviderKey.String("aws")
- // Microsoft Azure
- CloudProviderAzure = CloudProviderKey.String("azure")
- // Google Cloud Platform
- CloudProviderGCP = CloudProviderKey.String("gcp")
- // Heroku Platform as a Service
- CloudProviderHeroku = CloudProviderKey.String("heroku")
- // IBM Cloud
- CloudProviderIbmCloud = CloudProviderKey.String("ibm_cloud")
- // Tencent Cloud
- CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
-)
-
-// CloudAccountID returns an attribute KeyValue conforming to the
-// "cloud.account.id" semantic conventions. It represents the cloud account ID
-// the resource is assigned to.
-func CloudAccountID(val string) attribute.KeyValue {
- return CloudAccountIDKey.String(val)
-}
-
-// CloudAvailabilityZone returns an attribute KeyValue conforming to the
-// "cloud.availability_zone" semantic conventions. It represents the cloud
-// regions often have multiple, isolated locations known as zones to increase
-// availability. Availability zone represents the zone where the resource is
-// running.
-func CloudAvailabilityZone(val string) attribute.KeyValue {
- return CloudAvailabilityZoneKey.String(val)
-}
-
-// CloudRegion returns an attribute KeyValue conforming to the
-// "cloud.region" semantic conventions. It represents the geographical region
-// the resource is running.
-func CloudRegion(val string) attribute.KeyValue {
- return CloudRegionKey.String(val)
-}
-
-// CloudResourceID returns an attribute KeyValue conforming to the
-// "cloud.resource_id" semantic conventions. It represents the cloud
-// provider-specific native identifier of the monitored cloud resource (e.g. an
-// [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html)
-// on AWS, a [fully qualified resource
-// ID](https://learn.microsoft.com/rest/api/resources/resources/get-by-id) on
-// Azure, a [full resource
-// name](https://cloud.google.com/apis/design/resource_names#full_resource_name)
-// on GCP)
-func CloudResourceID(val string) attribute.KeyValue {
- return CloudResourceIDKey.String(val)
-}
-
-// Attributes for CloudEvents.
-const (
- // CloudeventsEventIDKey is the attribute Key conforming to the
- // "cloudevents.event_id" semantic conventions. It represents the
- // [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
- // uniquely identifies the event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '123e4567-e89b-12d3-a456-426614174000', '0001'
- CloudeventsEventIDKey = attribute.Key("cloudevents.event_id")
-
- // CloudeventsEventSourceKey is the attribute Key conforming to the
- // "cloudevents.event_source" semantic conventions. It represents the
- // [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
- // identifies the context in which an event happened.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://github.com/cloudevents',
- // '/cloudevents/spec/pull/123', 'my-service'
- CloudeventsEventSourceKey = attribute.Key("cloudevents.event_source")
-
- // CloudeventsEventSpecVersionKey is the attribute Key conforming to the
- // "cloudevents.event_spec_version" semantic conventions. It represents the
- // [version of the CloudEvents
- // specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
- // which the event uses.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.0'
- CloudeventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
-
- // CloudeventsEventSubjectKey is the attribute Key conforming to the
- // "cloudevents.event_subject" semantic conventions. It represents the
- // [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
- // of the event in the context of the event producer (identified by
- // source).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mynewfile.jpg'
- CloudeventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
-
- // CloudeventsEventTypeKey is the attribute Key conforming to the
- // "cloudevents.event_type" semantic conventions. It represents the
- // [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
- // contains a value describing the type of event related to the originating
- // occurrence.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.github.pull_request.opened',
- // 'com.example.object.deleted.v2'
- CloudeventsEventTypeKey = attribute.Key("cloudevents.event_type")
-)
-
-// CloudeventsEventID returns an attribute KeyValue conforming to the
-// "cloudevents.event_id" semantic conventions. It represents the
-// [event_id](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id)
-// uniquely identifies the event.
-func CloudeventsEventID(val string) attribute.KeyValue {
- return CloudeventsEventIDKey.String(val)
-}
-
-// CloudeventsEventSource returns an attribute KeyValue conforming to the
-// "cloudevents.event_source" semantic conventions. It represents the
-// [source](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1)
-// identifies the context in which an event happened.
-func CloudeventsEventSource(val string) attribute.KeyValue {
- return CloudeventsEventSourceKey.String(val)
-}
-
-// CloudeventsEventSpecVersion returns an attribute KeyValue conforming to
-// the "cloudevents.event_spec_version" semantic conventions. It represents the
-// [version of the CloudEvents
-// specification](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion)
-// which the event uses.
-func CloudeventsEventSpecVersion(val string) attribute.KeyValue {
- return CloudeventsEventSpecVersionKey.String(val)
-}
-
-// CloudeventsEventSubject returns an attribute KeyValue conforming to the
-// "cloudevents.event_subject" semantic conventions. It represents the
-// [subject](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject)
-// of the event in the context of the event producer (identified by source).
-func CloudeventsEventSubject(val string) attribute.KeyValue {
- return CloudeventsEventSubjectKey.String(val)
-}
-
-// CloudeventsEventType returns an attribute KeyValue conforming to the
-// "cloudevents.event_type" semantic conventions. It represents the
-// [event_type](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type)
-// contains a value describing the type of event related to the originating
-// occurrence.
-func CloudeventsEventType(val string) attribute.KeyValue {
- return CloudeventsEventTypeKey.String(val)
-}
-
-// These attributes allow to report this unit of code and therefore to provide
-// more context about the span.
-const (
- // CodeColumnKey is the attribute Key conforming to the "code.column"
- // semantic conventions. It represents the column number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 16
- CodeColumnKey = attribute.Key("code.column")
-
- // CodeFilepathKey is the attribute Key conforming to the "code.filepath"
- // semantic conventions. It represents the source code file name that
- // identifies the code unit as uniquely as possible (preferably an absolute
- // file path).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/local/MyApplication/content_root/app/index.php'
- CodeFilepathKey = attribute.Key("code.filepath")
-
- // CodeFunctionKey is the attribute Key conforming to the "code.function"
- // semantic conventions. It represents the method or function name, or
- // equivalent (usually rightmost part of the code unit's name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'serveRequest'
- CodeFunctionKey = attribute.Key("code.function")
-
- // CodeLineNumberKey is the attribute Key conforming to the "code.lineno"
- // semantic conventions. It represents the line number in `code.filepath`
- // best representing the operation. It SHOULD point within the code unit
- // named in `code.function`.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- CodeLineNumberKey = attribute.Key("code.lineno")
-
- // CodeNamespaceKey is the attribute Key conforming to the "code.namespace"
- // semantic conventions. It represents the "namespace" within which
- // `code.function` is defined. Usually the qualified class or module name,
- // such that `code.namespace` + some separator + `code.function` form a
- // unique identifier for the code unit.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com.example.MyHTTPService'
- CodeNamespaceKey = attribute.Key("code.namespace")
-
- // CodeStacktraceKey is the attribute Key conforming to the
- // "code.stacktrace" semantic conventions. It represents a stacktrace as a
- // string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'at
- // com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- CodeStacktraceKey = attribute.Key("code.stacktrace")
-)
-
-// CodeColumn returns an attribute KeyValue conforming to the "code.column"
-// semantic conventions. It represents the column number in `code.filepath`
-// best representing the operation. It SHOULD point within the code unit named
-// in `code.function`.
-func CodeColumn(val int) attribute.KeyValue {
- return CodeColumnKey.Int(val)
-}
-
-// CodeFilepath returns an attribute KeyValue conforming to the
-// "code.filepath" semantic conventions. It represents the source code file
-// name that identifies the code unit as uniquely as possible (preferably an
-// absolute file path).
-func CodeFilepath(val string) attribute.KeyValue {
- return CodeFilepathKey.String(val)
-}
-
-// CodeFunction returns an attribute KeyValue conforming to the
-// "code.function" semantic conventions. It represents the method or function
-// name, or equivalent (usually rightmost part of the code unit's name).
-func CodeFunction(val string) attribute.KeyValue {
- return CodeFunctionKey.String(val)
-}
-
-// CodeLineNumber returns an attribute KeyValue conforming to the "code.lineno"
-// semantic conventions. It represents the line number in `code.filepath` best
-// representing the operation. It SHOULD point within the code unit named in
-// `code.function`.
-func CodeLineNumber(val int) attribute.KeyValue {
- return CodeLineNumberKey.Int(val)
-}
-
-// CodeNamespace returns an attribute KeyValue conforming to the
-// "code.namespace" semantic conventions. It represents the "namespace" within
-// which `code.function` is defined. Usually the qualified class or module
-// name, such that `code.namespace` + some separator + `code.function` form a
-// unique identifier for the code unit.
-func CodeNamespace(val string) attribute.KeyValue {
- return CodeNamespaceKey.String(val)
-}
-
-// CodeStacktrace returns an attribute KeyValue conforming to the
-// "code.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func CodeStacktrace(val string) attribute.KeyValue {
- return CodeStacktraceKey.String(val)
-}
-
-// A container instance.
-const (
- // ContainerCommandKey is the attribute Key conforming to the
- // "container.command" semantic conventions. It represents the command used
- // to run the container (i.e. the command name).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol'
- // Note: If using embedded credentials or sensitive data, it is recommended
- // to remove them to prevent potential leakage.
- ContainerCommandKey = attribute.Key("container.command")
-
- // ContainerCommandArgsKey is the attribute Key conforming to the
- // "container.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) run by the
- // container. [2]
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol, --config, config.yaml'
- ContainerCommandArgsKey = attribute.Key("container.command_args")
-
- // ContainerCommandLineKey is the attribute Key conforming to the
- // "container.command_line" semantic conventions. It represents the full
- // command run by the container as a single string representing the full
- // command. [2]
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcontribcol --config config.yaml'
- ContainerCommandLineKey = attribute.Key("container.command_line")
-
- // ContainerCPUStateKey is the attribute Key conforming to the
- // "container.cpu.state" semantic conventions. It represents the CPU state
- // for this data point.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'user', 'kernel'
- ContainerCPUStateKey = attribute.Key("container.cpu.state")
-
- // ContainerIDKey is the attribute Key conforming to the "container.id"
- // semantic conventions. It represents the container ID. Usually a UUID, as
- // for example used to [identify Docker
- // containers](https://docs.docker.com/engine/reference/run/#container-identification).
- // The UUID might be abbreviated.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'a3bf90e006b2'
- ContainerIDKey = attribute.Key("container.id")
-
- // ContainerImageIDKey is the attribute Key conforming to the
- // "container.image.id" semantic conventions. It represents the runtime
- // specific image identifier. Usually a hash algorithm followed by a UUID.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f'
- // Note: Docker defines a sha256 of the image id; `container.image.id`
- // corresponds to the `Image` field from the Docker container inspect
- // [API](https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect)
- // endpoint.
- // K8S defines a link to the container registry repository with digest
- // `"imageID": "registry.azurecr.io
- // /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`.
- // The ID is assigned by the container runtime and can vary in different
- // environments. Consider using `oci.manifest.digest` if it is important to
- // identify the same image in different environments/runtimes.
- ContainerImageIDKey = attribute.Key("container.image.id")
-
- // ContainerImageNameKey is the attribute Key conforming to the
- // "container.image.name" semantic conventions. It represents the name of
- // the image the container was built on.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gcr.io/opentelemetry/operator'
- ContainerImageNameKey = attribute.Key("container.image.name")
-
- // ContainerImageRepoDigestsKey is the attribute Key conforming to the
- // "container.image.repo_digests" semantic conventions. It represents the
- // repo digests of the container image as provided by the container
- // runtime.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb',
- // 'internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578'
- // Note:
- // [Docker](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect)
- // and
- // [CRI](https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238)
- // report those under the `RepoDigests` field.
- ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
-
- // ContainerImageTagsKey is the attribute Key conforming to the
- // "container.image.tags" semantic conventions. It represents the container
- // image tags. An example can be found in [Docker Image
- // Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
- // Should be only the `` section of the full name for example from
- // `registry.example.com/my-org/my-image:`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'v1.27.1', '3.5.7-0'
- ContainerImageTagsKey = attribute.Key("container.image.tags")
-
- // ContainerNameKey is the attribute Key conforming to the "container.name"
- // semantic conventions. It represents the container name used by container
- // runtime.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-autoconf'
- ContainerNameKey = attribute.Key("container.name")
-
- // ContainerRuntimeKey is the attribute Key conforming to the
- // "container.runtime" semantic conventions. It represents the container
- // runtime managing this container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'docker', 'containerd', 'rkt'
- ContainerRuntimeKey = attribute.Key("container.runtime")
-)
-
-var (
- // When tasks of the cgroup are in user mode (Linux). When all container processes are in user mode (Windows)
- ContainerCPUStateUser = ContainerCPUStateKey.String("user")
- // When CPU is used by the system (host OS)
- ContainerCPUStateSystem = ContainerCPUStateKey.String("system")
- // When tasks of the cgroup are in kernel mode (Linux). When all container processes are in kernel mode (Windows)
- ContainerCPUStateKernel = ContainerCPUStateKey.String("kernel")
-)
-
-// ContainerCommand returns an attribute KeyValue conforming to the
-// "container.command" semantic conventions. It represents the command used to
-// run the container (i.e. the command name).
-func ContainerCommand(val string) attribute.KeyValue {
- return ContainerCommandKey.String(val)
-}
-
-// ContainerCommandArgs returns an attribute KeyValue conforming to the
-// "container.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) run by the
-// container. [2]
-func ContainerCommandArgs(val ...string) attribute.KeyValue {
- return ContainerCommandArgsKey.StringSlice(val)
-}
-
-// ContainerCommandLine returns an attribute KeyValue conforming to the
-// "container.command_line" semantic conventions. It represents the full
-// command run by the container as a single string representing the full
-// command. [2]
-func ContainerCommandLine(val string) attribute.KeyValue {
- return ContainerCommandLineKey.String(val)
-}
-
-// ContainerID returns an attribute KeyValue conforming to the
-// "container.id" semantic conventions. It represents the container ID. Usually
-// a UUID, as for example used to [identify Docker
-// containers](https://docs.docker.com/engine/reference/run/#container-identification).
-// The UUID might be abbreviated.
-func ContainerID(val string) attribute.KeyValue {
- return ContainerIDKey.String(val)
-}
-
-// ContainerImageID returns an attribute KeyValue conforming to the
-// "container.image.id" semantic conventions. It represents the runtime
-// specific image identifier. Usually a hash algorithm followed by a UUID.
-func ContainerImageID(val string) attribute.KeyValue {
- return ContainerImageIDKey.String(val)
-}
-
-// ContainerImageName returns an attribute KeyValue conforming to the
-// "container.image.name" semantic conventions. It represents the name of the
-// image the container was built on.
-func ContainerImageName(val string) attribute.KeyValue {
- return ContainerImageNameKey.String(val)
-}
-
-// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
-// "container.image.repo_digests" semantic conventions. It represents the repo
-// digests of the container image as provided by the container runtime.
-func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
- return ContainerImageRepoDigestsKey.StringSlice(val)
-}
-
-// ContainerImageTags returns an attribute KeyValue conforming to the
-// "container.image.tags" semantic conventions. It represents the container
-// image tags. An example can be found in [Docker Image
-// Inspect](https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect).
-// Should be only the `` section of the full name for example from
-// `registry.example.com/my-org/my-image:`.
-func ContainerImageTags(val ...string) attribute.KeyValue {
- return ContainerImageTagsKey.StringSlice(val)
-}
-
-// ContainerName returns an attribute KeyValue conforming to the
-// "container.name" semantic conventions. It represents the container name used
-// by container runtime.
-func ContainerName(val string) attribute.KeyValue {
- return ContainerNameKey.String(val)
-}
-
-// ContainerRuntime returns an attribute KeyValue conforming to the
-// "container.runtime" semantic conventions. It represents the container
-// runtime managing this container.
-func ContainerRuntime(val string) attribute.KeyValue {
- return ContainerRuntimeKey.String(val)
-}
-
-// This group defines the attributes used to describe telemetry in the context
-// of databases.
-const (
- // DBClientConnectionsPoolNameKey is the attribute Key conforming to the
- // "db.client.connections.pool.name" semantic conventions. It represents
- // the name of the connection pool; unique within the instrumented
- // application. In case the connection pool implementation doesn't provide
- // a name, instrumentation should use a combination of `server.address` and
- // `server.port` attributes formatted as `server.address:server.port`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myDataSource'
- DBClientConnectionsPoolNameKey = attribute.Key("db.client.connections.pool.name")
-
- // DBClientConnectionsStateKey is the attribute Key conforming to the
- // "db.client.connections.state" semantic conventions. It represents the
- // state of a connection in the pool
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle'
- DBClientConnectionsStateKey = attribute.Key("db.client.connections.state")
-
- // DBCollectionNameKey is the attribute Key conforming to the
- // "db.collection.name" semantic conventions. It represents the name of a
- // collection (table, container) within the database.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'public.users', 'customers'
- // Note: If the collection name is parsed from the query, it SHOULD match
- // the value provided in the query and may be qualified with the schema and
- // database name.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBCollectionNameKey = attribute.Key("db.collection.name")
-
- // DBNamespaceKey is the attribute Key conforming to the "db.namespace"
- // semantic conventions. It represents the name of the database, fully
- // qualified within the server address and port.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'customers', 'test.users'
- // Note: If a database system has multiple namespace components, they
- // SHOULD be concatenated (potentially using database system specific
- // conventions) from most general to most specific namespace component, and
- // more specific namespaces SHOULD NOT be captured without the more general
- // namespaces, to ensure that "startswith" queries for the more general
- // namespaces will be valid.
- // Semantic conventions for individual database systems SHOULD document
- // what `db.namespace` means in the context of that system.
- // It is RECOMMENDED to capture the value as provided by the application
- // without attempting to do any case normalization.
- DBNamespaceKey = attribute.Key("db.namespace")
-
- // DBOperationNameKey is the attribute Key conforming to the
- // "db.operation.name" semantic conventions. It represents the name of the
- // operation or command being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findAndModify', 'HMSET', 'SELECT'
- // Note: It is RECOMMENDED to capture the value as provided by the
- // application without attempting to do any case normalization.
- DBOperationNameKey = attribute.Key("db.operation.name")
-
- // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
- // semantic conventions. It represents the database query being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'SELECT * FROM wuser_table where username = ?', 'SET mykey
- // "WuValue"'
- DBQueryTextKey = attribute.Key("db.query.text")
-
- // DBSystemKey is the attribute Key conforming to the "db.system" semantic
- // conventions. It represents the database management system (DBMS) product
- // as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual DBMS may differ from the one identified by the client.
- // For example, when using PostgreSQL client libraries to connect to a
- // CockroachDB, the `db.system` is set to `postgresql` based on the
- // instrumentation's best knowledge.
- DBSystemKey = attribute.Key("db.system")
-)
-
-var (
- // idle
- DBClientConnectionsStateIdle = DBClientConnectionsStateKey.String("idle")
- // used
- DBClientConnectionsStateUsed = DBClientConnectionsStateKey.String("used")
-)
-
-var (
- // Some other SQL database. Fallback only. See notes
- DBSystemOtherSQL = DBSystemKey.String("other_sql")
- // Microsoft SQL Server
- DBSystemMSSQL = DBSystemKey.String("mssql")
- // Microsoft SQL Server Compact
- DBSystemMssqlcompact = DBSystemKey.String("mssqlcompact")
- // MySQL
- DBSystemMySQL = DBSystemKey.String("mysql")
- // Oracle Database
- DBSystemOracle = DBSystemKey.String("oracle")
- // IBM DB2
- DBSystemDB2 = DBSystemKey.String("db2")
- // PostgreSQL
- DBSystemPostgreSQL = DBSystemKey.String("postgresql")
- // Amazon Redshift
- DBSystemRedshift = DBSystemKey.String("redshift")
- // Apache Hive
- DBSystemHive = DBSystemKey.String("hive")
- // Cloudscape
- DBSystemCloudscape = DBSystemKey.String("cloudscape")
- // HyperSQL DataBase
- DBSystemHSQLDB = DBSystemKey.String("hsqldb")
- // Progress Database
- DBSystemProgress = DBSystemKey.String("progress")
- // SAP MaxDB
- DBSystemMaxDB = DBSystemKey.String("maxdb")
- // SAP HANA
- DBSystemHanaDB = DBSystemKey.String("hanadb")
- // Ingres
- DBSystemIngres = DBSystemKey.String("ingres")
- // FirstSQL
- DBSystemFirstSQL = DBSystemKey.String("firstsql")
- // EnterpriseDB
- DBSystemEDB = DBSystemKey.String("edb")
- // InterSystems Caché
- DBSystemCache = DBSystemKey.String("cache")
- // Adabas (Adaptable Database System)
- DBSystemAdabas = DBSystemKey.String("adabas")
- // Firebird
- DBSystemFirebird = DBSystemKey.String("firebird")
- // Apache Derby
- DBSystemDerby = DBSystemKey.String("derby")
- // FileMaker
- DBSystemFilemaker = DBSystemKey.String("filemaker")
- // Informix
- DBSystemInformix = DBSystemKey.String("informix")
- // InstantDB
- DBSystemInstantDB = DBSystemKey.String("instantdb")
- // InterBase
- DBSystemInterbase = DBSystemKey.String("interbase")
- // MariaDB
- DBSystemMariaDB = DBSystemKey.String("mariadb")
- // Netezza
- DBSystemNetezza = DBSystemKey.String("netezza")
- // Pervasive PSQL
- DBSystemPervasive = DBSystemKey.String("pervasive")
- // PointBase
- DBSystemPointbase = DBSystemKey.String("pointbase")
- // SQLite
- DBSystemSqlite = DBSystemKey.String("sqlite")
- // Sybase
- DBSystemSybase = DBSystemKey.String("sybase")
- // Teradata
- DBSystemTeradata = DBSystemKey.String("teradata")
- // Vertica
- DBSystemVertica = DBSystemKey.String("vertica")
- // H2
- DBSystemH2 = DBSystemKey.String("h2")
- // ColdFusion IMQ
- DBSystemColdfusion = DBSystemKey.String("coldfusion")
- // Apache Cassandra
- DBSystemCassandra = DBSystemKey.String("cassandra")
- // Apache HBase
- DBSystemHBase = DBSystemKey.String("hbase")
- // MongoDB
- DBSystemMongoDB = DBSystemKey.String("mongodb")
- // Redis
- DBSystemRedis = DBSystemKey.String("redis")
- // Couchbase
- DBSystemCouchbase = DBSystemKey.String("couchbase")
- // CouchDB
- DBSystemCouchDB = DBSystemKey.String("couchdb")
- // Microsoft Azure Cosmos DB
- DBSystemCosmosDB = DBSystemKey.String("cosmosdb")
- // Amazon DynamoDB
- DBSystemDynamoDB = DBSystemKey.String("dynamodb")
- // Neo4j
- DBSystemNeo4j = DBSystemKey.String("neo4j")
- // Apache Geode
- DBSystemGeode = DBSystemKey.String("geode")
- // Elasticsearch
- DBSystemElasticsearch = DBSystemKey.String("elasticsearch")
- // Memcached
- DBSystemMemcached = DBSystemKey.String("memcached")
- // CockroachDB
- DBSystemCockroachdb = DBSystemKey.String("cockroachdb")
- // OpenSearch
- DBSystemOpensearch = DBSystemKey.String("opensearch")
- // ClickHouse
- DBSystemClickhouse = DBSystemKey.String("clickhouse")
- // Cloud Spanner
- DBSystemSpanner = DBSystemKey.String("spanner")
- // Trino
- DBSystemTrino = DBSystemKey.String("trino")
-)
-
-// DBClientConnectionsPoolName returns an attribute KeyValue conforming to
-// the "db.client.connections.pool.name" semantic conventions. It represents
-// the name of the connection pool; unique within the instrumented application.
-// In case the connection pool implementation doesn't provide a name,
-// instrumentation should use a combination of `server.address` and
-// `server.port` attributes formatted as `server.address:server.port`.
-func DBClientConnectionsPoolName(val string) attribute.KeyValue {
- return DBClientConnectionsPoolNameKey.String(val)
-}
-
-// DBCollectionName returns an attribute KeyValue conforming to the
-// "db.collection.name" semantic conventions. It represents the name of a
-// collection (table, container) within the database.
-func DBCollectionName(val string) attribute.KeyValue {
- return DBCollectionNameKey.String(val)
-}
-
-// DBNamespace returns an attribute KeyValue conforming to the
-// "db.namespace" semantic conventions. It represents the name of the database,
-// fully qualified within the server address and port.
-func DBNamespace(val string) attribute.KeyValue {
- return DBNamespaceKey.String(val)
-}
-
-// DBOperationName returns an attribute KeyValue conforming to the
-// "db.operation.name" semantic conventions. It represents the name of the
-// operation or command being executed.
-func DBOperationName(val string) attribute.KeyValue {
- return DBOperationNameKey.String(val)
-}
-
-// DBQueryText returns an attribute KeyValue conforming to the
-// "db.query.text" semantic conventions. It represents the database query being
-// executed.
-func DBQueryText(val string) attribute.KeyValue {
- return DBQueryTextKey.String(val)
-}
-
-// This group defines attributes for Cassandra.
-const (
- // DBCassandraConsistencyLevelKey is the attribute Key conforming to the
- // "db.cassandra.consistency_level" semantic conventions. It represents the
- // consistency level of the query. Based on consistency values from
- // [CQL](https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level")
-
- // DBCassandraCoordinatorDCKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.dc" semantic conventions. It represents the
- // data center of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'us-west-2'
- DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc")
-
- // DBCassandraCoordinatorIDKey is the attribute Key conforming to the
- // "db.cassandra.coordinator.id" semantic conventions. It represents the ID
- // of the coordinating node for a query.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af'
- DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id")
-
- // DBCassandraIdempotenceKey is the attribute Key conforming to the
- // "db.cassandra.idempotence" semantic conventions. It represents the
- // whether or not the query is idempotent.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence")
-
- // DBCassandraPageSizeKey is the attribute Key conforming to the
- // "db.cassandra.page_size" semantic conventions. It represents the fetch
- // size used for paging, i.e. how many rows will be returned at once.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 5000
- DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size")
-
- // DBCassandraSpeculativeExecutionCountKey is the attribute Key conforming
- // to the "db.cassandra.speculative_execution_count" semantic conventions.
- // It represents the number of times a query was speculatively executed.
- // Not set or `0` if the query was not executed speculatively.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 2
- DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count")
-)
-
-var (
- // all
- DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all")
- // each_quorum
- DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum")
- // quorum
- DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum")
- // local_quorum
- DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum")
- // one
- DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one")
- // two
- DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two")
- // three
- DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three")
- // local_one
- DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one")
- // any
- DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any")
- // serial
- DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial")
- // local_serial
- DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial")
-)
-
-// DBCassandraCoordinatorDC returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.dc" semantic conventions. It represents the data
-// center of the coordinating node for a query.
-func DBCassandraCoordinatorDC(val string) attribute.KeyValue {
- return DBCassandraCoordinatorDCKey.String(val)
-}
-
-// DBCassandraCoordinatorID returns an attribute KeyValue conforming to the
-// "db.cassandra.coordinator.id" semantic conventions. It represents the ID of
-// the coordinating node for a query.
-func DBCassandraCoordinatorID(val string) attribute.KeyValue {
- return DBCassandraCoordinatorIDKey.String(val)
-}
-
-// DBCassandraIdempotence returns an attribute KeyValue conforming to the
-// "db.cassandra.idempotence" semantic conventions. It represents the whether
-// or not the query is idempotent.
-func DBCassandraIdempotence(val bool) attribute.KeyValue {
- return DBCassandraIdempotenceKey.Bool(val)
-}
-
-// DBCassandraPageSize returns an attribute KeyValue conforming to the
-// "db.cassandra.page_size" semantic conventions. It represents the fetch size
-// used for paging, i.e. how many rows will be returned at once.
-func DBCassandraPageSize(val int) attribute.KeyValue {
- return DBCassandraPageSizeKey.Int(val)
-}
-
-// DBCassandraSpeculativeExecutionCount returns an attribute KeyValue
-// conforming to the "db.cassandra.speculative_execution_count" semantic
-// conventions. It represents the number of times a query was speculatively
-// executed. Not set or `0` if the query was not executed speculatively.
-func DBCassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
- return DBCassandraSpeculativeExecutionCountKey.Int(val)
-}
-
-// This group defines attributes for Azure Cosmos DB.
-const (
- // DBCosmosDBClientIDKey is the attribute Key conforming to the
- // "db.cosmosdb.client_id" semantic conventions. It represents the unique
- // Cosmos client instance id.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '3ba4827d-4422-483f-b59f-85b74211c11d'
- DBCosmosDBClientIDKey = attribute.Key("db.cosmosdb.client_id")
-
- // DBCosmosDBConnectionModeKey is the attribute Key conforming to the
- // "db.cosmosdb.connection_mode" semantic conventions. It represents the
- // cosmos client connection mode.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBConnectionModeKey = attribute.Key("db.cosmosdb.connection_mode")
-
- // DBCosmosDBOperationTypeKey is the attribute Key conforming to the
- // "db.cosmosdb.operation_type" semantic conventions. It represents the
- // cosmosDB Operation Type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBOperationTypeKey = attribute.Key("db.cosmosdb.operation_type")
-
- // DBCosmosDBRequestChargeKey is the attribute Key conforming to the
- // "db.cosmosdb.request_charge" semantic conventions. It represents the rU
- // consumed for that operation
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 46.18, 1.0
- DBCosmosDBRequestChargeKey = attribute.Key("db.cosmosdb.request_charge")
-
- // DBCosmosDBRequestContentLengthKey is the attribute Key conforming to the
- // "db.cosmosdb.request_content_length" semantic conventions. It represents
- // the request payload size in bytes
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- DBCosmosDBRequestContentLengthKey = attribute.Key("db.cosmosdb.request_content_length")
-
- // DBCosmosDBStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.status_code" semantic conventions. It represents the cosmos
- // DB status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 200, 201
- DBCosmosDBStatusCodeKey = attribute.Key("db.cosmosdb.status_code")
-
- // DBCosmosDBSubStatusCodeKey is the attribute Key conforming to the
- // "db.cosmosdb.sub_status_code" semantic conventions. It represents the
- // cosmos DB sub status code.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000, 1002
- DBCosmosDBSubStatusCodeKey = attribute.Key("db.cosmosdb.sub_status_code")
-)
-
-var (
- // Gateway (HTTP) connections mode
- DBCosmosDBConnectionModeGateway = DBCosmosDBConnectionModeKey.String("gateway")
- // Direct connection
- DBCosmosDBConnectionModeDirect = DBCosmosDBConnectionModeKey.String("direct")
-)
-
-var (
- // invalid
- DBCosmosDBOperationTypeInvalid = DBCosmosDBOperationTypeKey.String("Invalid")
- // create
- DBCosmosDBOperationTypeCreate = DBCosmosDBOperationTypeKey.String("Create")
- // patch
- DBCosmosDBOperationTypePatch = DBCosmosDBOperationTypeKey.String("Patch")
- // read
- DBCosmosDBOperationTypeRead = DBCosmosDBOperationTypeKey.String("Read")
- // read_feed
- DBCosmosDBOperationTypeReadFeed = DBCosmosDBOperationTypeKey.String("ReadFeed")
- // delete
- DBCosmosDBOperationTypeDelete = DBCosmosDBOperationTypeKey.String("Delete")
- // replace
- DBCosmosDBOperationTypeReplace = DBCosmosDBOperationTypeKey.String("Replace")
- // execute
- DBCosmosDBOperationTypeExecute = DBCosmosDBOperationTypeKey.String("Execute")
- // query
- DBCosmosDBOperationTypeQuery = DBCosmosDBOperationTypeKey.String("Query")
- // head
- DBCosmosDBOperationTypeHead = DBCosmosDBOperationTypeKey.String("Head")
- // head_feed
- DBCosmosDBOperationTypeHeadFeed = DBCosmosDBOperationTypeKey.String("HeadFeed")
- // upsert
- DBCosmosDBOperationTypeUpsert = DBCosmosDBOperationTypeKey.String("Upsert")
- // batch
- DBCosmosDBOperationTypeBatch = DBCosmosDBOperationTypeKey.String("Batch")
- // query_plan
- DBCosmosDBOperationTypeQueryPlan = DBCosmosDBOperationTypeKey.String("QueryPlan")
- // execute_javascript
- DBCosmosDBOperationTypeExecuteJavascript = DBCosmosDBOperationTypeKey.String("ExecuteJavaScript")
-)
-
-// DBCosmosDBClientID returns an attribute KeyValue conforming to the
-// "db.cosmosdb.client_id" semantic conventions. It represents the unique
-// Cosmos client instance id.
-func DBCosmosDBClientID(val string) attribute.KeyValue {
- return DBCosmosDBClientIDKey.String(val)
-}
-
-// DBCosmosDBRequestCharge returns an attribute KeyValue conforming to the
-// "db.cosmosdb.request_charge" semantic conventions. It represents the rU
-// consumed for that operation
-func DBCosmosDBRequestCharge(val float64) attribute.KeyValue {
- return DBCosmosDBRequestChargeKey.Float64(val)
-}
-
-// DBCosmosDBRequestContentLength returns an attribute KeyValue conforming
-// to the "db.cosmosdb.request_content_length" semantic conventions. It
-// represents the request payload size in bytes
-func DBCosmosDBRequestContentLength(val int) attribute.KeyValue {
- return DBCosmosDBRequestContentLengthKey.Int(val)
-}
-
-// DBCosmosDBStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.status_code" semantic conventions. It represents the cosmos DB
-// status code.
-func DBCosmosDBStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBStatusCodeKey.Int(val)
-}
-
-// DBCosmosDBSubStatusCode returns an attribute KeyValue conforming to the
-// "db.cosmosdb.sub_status_code" semantic conventions. It represents the cosmos
-// DB sub status code.
-func DBCosmosDBSubStatusCode(val int) attribute.KeyValue {
- return DBCosmosDBSubStatusCodeKey.Int(val)
-}
-
-// This group defines attributes for Elasticsearch.
-const (
- // DBElasticsearchClusterNameKey is the attribute Key conforming to the
- // "db.elasticsearch.cluster.name" semantic conventions. It represents the
- // represents the identifier of an Elasticsearch cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e9106fc68e3044f0b1475b04bf4ffd5f'
- DBElasticsearchClusterNameKey = attribute.Key("db.elasticsearch.cluster.name")
-
- // DBElasticsearchNodeNameKey is the attribute Key conforming to the
- // "db.elasticsearch.node.name" semantic conventions. It represents the
- // represents the human-readable identifier of the node/instance to which a
- // request was routed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-0000000001'
- DBElasticsearchNodeNameKey = attribute.Key("db.elasticsearch.node.name")
-)
-
-// DBElasticsearchClusterName returns an attribute KeyValue conforming to
-// the "db.elasticsearch.cluster.name" semantic conventions. It represents the
-// represents the identifier of an Elasticsearch cluster.
-func DBElasticsearchClusterName(val string) attribute.KeyValue {
- return DBElasticsearchClusterNameKey.String(val)
-}
-
-// DBElasticsearchNodeName returns an attribute KeyValue conforming to the
-// "db.elasticsearch.node.name" semantic conventions. It represents the
-// represents the human-readable identifier of the node/instance to which a
-// request was routed.
-func DBElasticsearchNodeName(val string) attribute.KeyValue {
- return DBElasticsearchNodeNameKey.String(val)
-}
-
-// Attributes for software deployments.
-const (
- // DeploymentEnvironmentKey is the attribute Key conforming to the
- // "deployment.environment" semantic conventions. It represents the name of
- // the [deployment
- // environment](https://wikipedia.org/wiki/Deployment_environment) (aka
- // deployment tier).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'staging', 'production'
- // Note: `deployment.environment` does not affect the uniqueness
- // constraints defined through
- // the `service.namespace`, `service.name` and `service.instance.id`
- // resource attributes.
- // This implies that resources carrying the following attribute
- // combinations MUST be
- // considered to be identifying the same service:
- //
- // * `service.name=frontend`, `deployment.environment=production`
- // * `service.name=frontend`, `deployment.environment=staging`.
- DeploymentEnvironmentKey = attribute.Key("deployment.environment")
-)
-
-// DeploymentEnvironment returns an attribute KeyValue conforming to the
-// "deployment.environment" semantic conventions. It represents the name of the
-// [deployment environment](https://wikipedia.org/wiki/Deployment_environment)
-// (aka deployment tier).
-func DeploymentEnvironment(val string) attribute.KeyValue {
- return DeploymentEnvironmentKey.String(val)
-}
-
-// Attributes that represents an occurrence of a lifecycle transition on the
-// Android platform.
-const (
- // AndroidStateKey is the attribute Key conforming to the "android.state"
- // semantic conventions. It represents the deprecated use the
- // `device.app.lifecycle` event definition including `android.state` as a
- // payload field instead.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The Android lifecycle states are defined in [Activity lifecycle
- // callbacks](https://developer.android.com/guide/components/activities/activity-lifecycle#lc),
- // and from which the `OS identifiers` are derived.
- AndroidStateKey = attribute.Key("android.state")
-)
-
-var (
- // Any time before Activity.onResume() or, if the app has no Activity, Context.startService() has been called in the app for the first time
- AndroidStateCreated = AndroidStateKey.String("created")
- // Any time after Activity.onPause() or, if the app has no Activity, Context.stopService() has been called when the app was in the foreground state
- AndroidStateBackground = AndroidStateKey.String("background")
- // Any time after Activity.onResume() or, if the app has no Activity, Context.startService() has been called when the app was in either the created or background states
- AndroidStateForeground = AndroidStateKey.String("foreground")
-)
-
-// These attributes may be used to describe the receiver of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // DestinationAddressKey is the attribute Key conforming to the
- // "destination.address" semantic conventions. It represents the
- // destination address - domain name if available without reverse DNS
- // lookup; otherwise, IP address or Unix domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'destination.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the source side, and when communicating through
- // an intermediary, `destination.address` SHOULD represent the destination
- // address behind any intermediaries, for example proxies, if it's
- // available.
- DestinationAddressKey = attribute.Key("destination.address")
-
- // DestinationPortKey is the attribute Key conforming to the
- // "destination.port" semantic conventions. It represents the destination
- // port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- DestinationPortKey = attribute.Key("destination.port")
-)
-
-// DestinationAddress returns an attribute KeyValue conforming to the
-// "destination.address" semantic conventions. It represents the destination
-// address - domain name if available without reverse DNS lookup; otherwise, IP
-// address or Unix domain socket name.
-func DestinationAddress(val string) attribute.KeyValue {
- return DestinationAddressKey.String(val)
-}
-
-// DestinationPort returns an attribute KeyValue conforming to the
-// "destination.port" semantic conventions. It represents the destination port
-// number
-func DestinationPort(val int) attribute.KeyValue {
- return DestinationPortKey.Int(val)
-}
-
-// Describes device attributes.
-const (
- // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
- // conventions. It represents a unique identifier representing the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092'
- // Note: The device identifier MUST only be defined using the values
- // outlined below. This value is not an advertising identifier and MUST NOT
- // be used as such. On iOS (Swift or Objective-C), this value MUST be equal
- // to the [vendor
- // identifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-identifierforvendor).
- // On Android (Java or Kotlin), this value MUST be equal to the Firebase
- // Installation ID or a globally unique UUID which is persisted across
- // sessions in your application. More information can be found
- // [here](https://developer.android.com/training/articles/user-data-ids) on
- // best practices and exact implementation details. Caution should be taken
- // when storing personal data or anything which can identify a user. GDPR
- // and data protection laws may apply, ensure you do your own due
- // diligence.
- DeviceIDKey = attribute.Key("device.id")
-
- // DeviceManufacturerKey is the attribute Key conforming to the
- // "device.manufacturer" semantic conventions. It represents the name of
- // the device manufacturer
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Apple', 'Samsung'
- // Note: The Android OS provides this field via
- // [Build](https://developer.android.com/reference/android/os/Build#MANUFACTURER).
- // iOS apps SHOULD hardcode the value `Apple`.
- DeviceManufacturerKey = attribute.Key("device.manufacturer")
-
- // DeviceModelIdentifierKey is the attribute Key conforming to the
- // "device.model.identifier" semantic conventions. It represents the model
- // identifier for the device
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone3,4', 'SM-G920F'
- // Note: It's recommended this value represents a machine-readable version
- // of the model identifier rather than the market or consumer-friendly name
- // of the device.
- DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
-
- // DeviceModelNameKey is the attribute Key conforming to the
- // "device.model.name" semantic conventions. It represents the marketing
- // name for the device model
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6'
- // Note: It's recommended this value represents a human-readable version of
- // the device model rather than a machine-readable alternative.
- DeviceModelNameKey = attribute.Key("device.model.name")
-)
-
-// DeviceID returns an attribute KeyValue conforming to the "device.id"
-// semantic conventions. It represents a unique identifier representing the
-// device
-func DeviceID(val string) attribute.KeyValue {
- return DeviceIDKey.String(val)
-}
-
-// DeviceManufacturer returns an attribute KeyValue conforming to the
-// "device.manufacturer" semantic conventions. It represents the name of the
-// device manufacturer
-func DeviceManufacturer(val string) attribute.KeyValue {
- return DeviceManufacturerKey.String(val)
-}
-
-// DeviceModelIdentifier returns an attribute KeyValue conforming to the
-// "device.model.identifier" semantic conventions. It represents the model
-// identifier for the device
-func DeviceModelIdentifier(val string) attribute.KeyValue {
- return DeviceModelIdentifierKey.String(val)
-}
-
-// DeviceModelName returns an attribute KeyValue conforming to the
-// "device.model.name" semantic conventions. It represents the marketing name
-// for the device model
-func DeviceModelName(val string) attribute.KeyValue {
- return DeviceModelNameKey.String(val)
-}
-
-// These attributes may be used for any disk related operation.
-const (
- // DiskIoDirectionKey is the attribute Key conforming to the
- // "disk.io.direction" semantic conventions. It represents the disk IO
- // operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read'
- DiskIoDirectionKey = attribute.Key("disk.io.direction")
-)
-
-var (
- // read
- DiskIoDirectionRead = DiskIoDirectionKey.String("read")
- // write
- DiskIoDirectionWrite = DiskIoDirectionKey.String("write")
-)
-
-// The shared attributes used to report a DNS query.
-const (
- // DNSQuestionNameKey is the attribute Key conforming to the
- // "dns.question.name" semantic conventions. It represents the name being
- // queried.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.example.com', 'opentelemetry.io'
- // Note: If the name field contains non-printable characters (below 32 or
- // above 126), those characters should be represented as escaped base 10
- // integers (\DDD). Back slashes and quotes should be escaped. Tabs,
- // carriage returns, and line feeds should be converted to \t, \r, and \n
- // respectively.
- DNSQuestionNameKey = attribute.Key("dns.question.name")
-)
-
-// DNSQuestionName returns an attribute KeyValue conforming to the
-// "dns.question.name" semantic conventions. It represents the name being
-// queried.
-func DNSQuestionName(val string) attribute.KeyValue {
- return DNSQuestionNameKey.String(val)
-}
-
-// Attributes for operations with an authenticated and/or authorized enduser.
-const (
- // EnduserIDKey is the attribute Key conforming to the "enduser.id"
- // semantic conventions. It represents the username or client_id extracted
- // from the access token or
- // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header
- // in the inbound request from outside the system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'username'
- EnduserIDKey = attribute.Key("enduser.id")
-
- // EnduserRoleKey is the attribute Key conforming to the "enduser.role"
- // semantic conventions. It represents the actual/assumed role the client
- // is making the request under extracted from token or application security
- // context.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'admin'
- EnduserRoleKey = attribute.Key("enduser.role")
-
- // EnduserScopeKey is the attribute Key conforming to the "enduser.scope"
- // semantic conventions. It represents the scopes or granted authorities
- // the client currently possesses extracted from token or application
- // security context. The value would come from the scope associated with an
- // [OAuth 2.0 Access
- // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
- // value in a [SAML 2.0
- // Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'read:message, write:files'
- EnduserScopeKey = attribute.Key("enduser.scope")
-)
-
-// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
-// semantic conventions. It represents the username or client_id extracted from
-// the access token or
-// [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in
-// the inbound request from outside the system.
-func EnduserID(val string) attribute.KeyValue {
- return EnduserIDKey.String(val)
-}
-
-// EnduserRole returns an attribute KeyValue conforming to the
-// "enduser.role" semantic conventions. It represents the actual/assumed role
-// the client is making the request under extracted from token or application
-// security context.
-func EnduserRole(val string) attribute.KeyValue {
- return EnduserRoleKey.String(val)
-}
-
-// EnduserScope returns an attribute KeyValue conforming to the
-// "enduser.scope" semantic conventions. It represents the scopes or granted
-// authorities the client currently possesses extracted from token or
-// application security context. The value would come from the scope associated
-// with an [OAuth 2.0 Access
-// Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute
-// value in a [SAML 2.0
-// Assertion](http://docs.oasis-open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html).
-func EnduserScope(val string) attribute.KeyValue {
- return EnduserScopeKey.String(val)
-}
-
-// The shared attributes used to report an error.
-const (
- // ErrorTypeKey is the attribute Key conforming to the "error.type"
- // semantic conventions. It represents the describes a class of error the
- // operation ended with.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'timeout', 'java.net.UnknownHostException',
- // 'server_certificate_invalid', '500'
- // Note: The `error.type` SHOULD be predictable, and SHOULD have low
- // cardinality.
- //
- // When `error.type` is set to a type (e.g., an exception type), its
- // canonical class name identifying the type within the artifact SHOULD be
- // used.
- //
- // Instrumentations SHOULD document the list of errors they report.
- //
- // The cardinality of `error.type` within one instrumentation library
- // SHOULD be low.
- // Telemetry consumers that aggregate data from multiple instrumentation
- // libraries and applications
- // should be prepared for `error.type` to have high cardinality at query
- // time when no
- // additional filters are applied.
- //
- // If the operation has completed successfully, instrumentations SHOULD NOT
- // set `error.type`.
- //
- // If a specific domain defines its own set of error identifiers (such as
- // HTTP or gRPC status codes),
- // it's RECOMMENDED to:
- //
- // * Use a domain-specific attribute
- // * Set `error.type` to capture all errors, regardless of whether they are
- // defined within the domain-specific set or not.
- ErrorTypeKey = attribute.Key("error.type")
-)
-
-var (
- // A fallback error value to be used when the instrumentation doesn't define a custom value
- ErrorTypeOther = ErrorTypeKey.String("_OTHER")
-)
-
-// Attributes for Events represented using Log Records.
-const (
- // EventNameKey is the attribute Key conforming to the "event.name"
- // semantic conventions. It represents the identifies the class / type of
- // event.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'browser.mouse.click', 'device.app.lifecycle'
- // Note: Event names are subject to the same rules as [attribute
- // names](https://github.com/open-telemetry/opentelemetry-specification/tree/v1.33.0/specification/common/attribute-naming.md).
- // Notably, event names are namespaced to avoid collisions and provide a
- // clean separation of semantics for events in separate domains like
- // browser, mobile, and kubernetes.
- EventNameKey = attribute.Key("event.name")
-)
-
-// EventName returns an attribute KeyValue conforming to the "event.name"
-// semantic conventions. It represents the identifies the class / type of
-// event.
-func EventName(val string) attribute.KeyValue {
- return EventNameKey.String(val)
-}
-
-// The shared attributes used to report a single exception associated with a
-// span or log.
-const (
- // ExceptionEscapedKey is the attribute Key conforming to the
- // "exception.escaped" semantic conventions. It represents the sHOULD be
- // set to true if the exception event is recorded at a point where it is
- // known that the exception is escaping the scope of the span.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- // Note: An exception is considered to have escaped (or left) the scope of
- // a span,
- // if that span is ended while the exception is still logically "in
- // flight".
- // This may be actually "in flight" in some languages (e.g. if the
- // exception
- // is passed to a Context manager's `__exit__` method in Python) but will
- // usually be caught at the point of recording the exception in most
- // languages.
- //
- // It is usually not possible to determine at the point where an exception
- // is thrown
- // whether it will escape the scope of a span.
- // However, it is trivial to know that an exception
- // will escape, if one checks for an active exception just before ending
- // the span,
- // as done in the [example for recording span
- // exceptions](https://opentelemetry.io/docs/specs/semconv/exceptions/exceptions-spans/#recording-an-exception).
- //
- // It follows that an exception may still escape the scope of the span
- // even if the `exception.escaped` attribute was not set or set to false,
- // since the event might have been recorded at a time where it was not
- // clear whether the exception will escape.
- ExceptionEscapedKey = attribute.Key("exception.escaped")
-
- // ExceptionMessageKey is the attribute Key conforming to the
- // "exception.message" semantic conventions. It represents the exception
- // message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Division by zero', "Can't convert 'int' object to str
- // implicitly"
- ExceptionMessageKey = attribute.Key("exception.message")
-
- // ExceptionStacktraceKey is the attribute Key conforming to the
- // "exception.stacktrace" semantic conventions. It represents a stacktrace
- // as a string in the natural representation for the language runtime. The
- // representation is to be determined and documented by each language SIG.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test
- // exception\\n at '
- // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at '
- // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at '
- // 'com.example.GenerateTrace.main(GenerateTrace.java:5)'
- ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
-
- // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
- // semantic conventions. It represents the type of the exception (its
- // fully-qualified class name, if applicable). The dynamic type of the
- // exception should be preferred over the static type in languages that
- // support it.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'java.net.ConnectException', 'OSError'
- ExceptionTypeKey = attribute.Key("exception.type")
-)
-
-// ExceptionEscaped returns an attribute KeyValue conforming to the
-// "exception.escaped" semantic conventions. It represents the sHOULD be set to
-// true if the exception event is recorded at a point where it is known that
-// the exception is escaping the scope of the span.
-func ExceptionEscaped(val bool) attribute.KeyValue {
- return ExceptionEscapedKey.Bool(val)
-}
-
-// ExceptionMessage returns an attribute KeyValue conforming to the
-// "exception.message" semantic conventions. It represents the exception
-// message.
-func ExceptionMessage(val string) attribute.KeyValue {
- return ExceptionMessageKey.String(val)
-}
-
-// ExceptionStacktrace returns an attribute KeyValue conforming to the
-// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
-// string in the natural representation for the language runtime. The
-// representation is to be determined and documented by each language SIG.
-func ExceptionStacktrace(val string) attribute.KeyValue {
- return ExceptionStacktraceKey.String(val)
-}
-
-// ExceptionType returns an attribute KeyValue conforming to the
-// "exception.type" semantic conventions. It represents the type of the
-// exception (its fully-qualified class name, if applicable). The dynamic type
-// of the exception should be preferred over the static type in languages that
-// support it.
-func ExceptionType(val string) attribute.KeyValue {
- return ExceptionTypeKey.String(val)
-}
-
-// FaaS attributes
-const (
- // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
- // semantic conventions. It represents a boolean that is true if the
- // serverless function is executed for the first time (aka cold-start).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSColdstartKey = attribute.Key("faas.coldstart")
-
- // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
- // conventions. It represents a string containing the schedule period as
- // [Cron
- // Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0/5 * * * ? *'
- FaaSCronKey = attribute.Key("faas.cron")
-
- // FaaSDocumentCollectionKey is the attribute Key conforming to the
- // "faas.document.collection" semantic conventions. It represents the name
- // of the source on which the triggering operation was performed. For
- // example, in Cloud Storage or S3 corresponds to the bucket name, and in
- // Cosmos DB to the database name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myBucketName', 'myDBName'
- FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
-
- // FaaSDocumentNameKey is the attribute Key conforming to the
- // "faas.document.name" semantic conventions. It represents the document
- // name/table subjected to the operation. For example, in Cloud Storage or
- // S3 is the name of the file, and in Cosmos DB the table name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myFile.txt', 'myTableName'
- FaaSDocumentNameKey = attribute.Key("faas.document.name")
-
- // FaaSDocumentOperationKey is the attribute Key conforming to the
- // "faas.document.operation" semantic conventions. It represents the
- // describes the type of the operation that was performed on the data.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
-
- // FaaSDocumentTimeKey is the attribute Key conforming to the
- // "faas.document.time" semantic conventions. It represents a string
- // containing the time when the data was accessed in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSDocumentTimeKey = attribute.Key("faas.document.time")
-
- // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
- // semantic conventions. It represents the execution environment ID as a
- // string, that will be potentially reused for other invocations to the
- // same function/function version.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de'
- // Note: * **AWS Lambda:** Use the (full) log stream name.
- FaaSInstanceKey = attribute.Key("faas.instance")
-
- // FaaSInvocationIDKey is the attribute Key conforming to the
- // "faas.invocation_id" semantic conventions. It represents the invocation
- // ID of the current function invocation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28'
- FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
-
- // FaaSInvokedNameKey is the attribute Key conforming to the
- // "faas.invoked_name" semantic conventions. It represents the name of the
- // invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function'
- // Note: SHOULD be equal to the `faas.name` resource attribute of the
- // invoked function.
- FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
-
- // FaaSInvokedProviderKey is the attribute Key conforming to the
- // "faas.invoked_provider" semantic conventions. It represents the cloud
- // provider of the invoked function.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
- // invoked function.
- FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
-
- // FaaSInvokedRegionKey is the attribute Key conforming to the
- // "faas.invoked_region" semantic conventions. It represents the cloud
- // region of the invoked function.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'eu-central-1'
- // Note: SHOULD be equal to the `cloud.region` resource attribute of the
- // invoked function.
- FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
-
- // FaaSMaxMemoryKey is the attribute Key conforming to the
- // "faas.max_memory" semantic conventions. It represents the amount of
- // memory available to the serverless function converted to Bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 134217728
- // Note: It's recommended to set this attribute since e.g. too little
- // memory can easily stop a Java AWS Lambda function from working
- // correctly. On AWS Lambda, the environment variable
- // `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this information (which must
- // be multiplied by 1,048,576).
- FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
-
- // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
- // conventions. It represents the name of the single function that this
- // runtime instance executes.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-function', 'myazurefunctionapp/some-function-name'
- // Note: This is the name of the function as configured/deployed on the
- // FaaS
- // platform and is usually different from the name of the callback
- // function (which may be stored in the
- // [`code.namespace`/`code.function`](/docs/general/attributes.md#source-code-attributes)
- // span attributes).
- //
- // For some cloud providers, the above definition is ambiguous. The
- // following
- // definition of function name MUST be used for this attribute
- // (and consequently the span name) for the listed cloud
- // providers/products:
- //
- // * **Azure:** The full name `/`, i.e., function app name
- // followed by a forward slash followed by the function name (this form
- // can also be seen in the resource JSON for the function).
- // This means that a span attribute MUST be used, as an Azure function
- // app can host multiple functions that would usually share
- // a TracerProvider (see also the `cloud.resource_id` attribute).
- FaaSNameKey = attribute.Key("faas.name")
-
- // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
- // conventions. It represents a string containing the function invocation
- // time in the [ISO
- // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
- // expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2020-01-23T13:47:06Z'
- FaaSTimeKey = attribute.Key("faas.time")
-
- // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger"
- // semantic conventions. It represents the type of the trigger which caused
- // this function invocation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- FaaSTriggerKey = attribute.Key("faas.trigger")
-
- // FaaSVersionKey is the attribute Key conforming to the "faas.version"
- // semantic conventions. It represents the immutable version of the
- // function being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '26', 'pinkfroid-00002'
- // Note: Depending on the cloud provider and platform, use:
- //
- // * **AWS Lambda:** The [function
- // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html)
- // (an integer represented as a decimal string).
- // * **Google Cloud Run (Services):** The
- // [revision](https://cloud.google.com/run/docs/managing/revisions)
- // (i.e., the function name plus the revision suffix).
- // * **Google Cloud Functions:** The value of the
- // [`K_REVISION` environment
- // variable](https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically).
- // * **Azure Functions:** Not applicable. Do not set this attribute.
- FaaSVersionKey = attribute.Key("faas.version")
-)
-
-var (
- // When a new object is created
- FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
- // When an object is modified
- FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
- // When an object is deleted
- FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
-)
-
-var (
- // Alibaba Cloud
- FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
- // Amazon Web Services
- FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
- // Microsoft Azure
- FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
- // Google Cloud Platform
- FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
- // Tencent Cloud
- FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
-)
-
-var (
- // A response to some data source operation such as a database or filesystem read/write
- FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
- // To provide an answer to an inbound HTTP request
- FaaSTriggerHTTP = FaaSTriggerKey.String("http")
- // A function is set to be executed when messages are sent to a messaging system
- FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub")
- // A function is scheduled to be executed regularly
- FaaSTriggerTimer = FaaSTriggerKey.String("timer")
- // If none of the others apply
- FaaSTriggerOther = FaaSTriggerKey.String("other")
-)
-
-// FaaSColdstart returns an attribute KeyValue conforming to the
-// "faas.coldstart" semantic conventions. It represents a boolean that is true
-// if the serverless function is executed for the first time (aka cold-start).
-func FaaSColdstart(val bool) attribute.KeyValue {
- return FaaSColdstartKey.Bool(val)
-}
-
-// FaaSCron returns an attribute KeyValue conforming to the "faas.cron"
-// semantic conventions. It represents a string containing the schedule period
-// as [Cron
-// Expression](https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm).
-func FaaSCron(val string) attribute.KeyValue {
- return FaaSCronKey.String(val)
-}
-
-// FaaSDocumentCollection returns an attribute KeyValue conforming to the
-// "faas.document.collection" semantic conventions. It represents the name of
-// the source on which the triggering operation was performed. For example, in
-// Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
-// database name.
-func FaaSDocumentCollection(val string) attribute.KeyValue {
- return FaaSDocumentCollectionKey.String(val)
-}
-
-// FaaSDocumentName returns an attribute KeyValue conforming to the
-// "faas.document.name" semantic conventions. It represents the document
-// name/table subjected to the operation. For example, in Cloud Storage or S3
-// is the name of the file, and in Cosmos DB the table name.
-func FaaSDocumentName(val string) attribute.KeyValue {
- return FaaSDocumentNameKey.String(val)
-}
-
-// FaaSDocumentTime returns an attribute KeyValue conforming to the
-// "faas.document.time" semantic conventions. It represents a string containing
-// the time when the data was accessed in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSDocumentTime(val string) attribute.KeyValue {
- return FaaSDocumentTimeKey.String(val)
-}
-
-// FaaSInstance returns an attribute KeyValue conforming to the
-// "faas.instance" semantic conventions. It represents the execution
-// environment ID as a string, that will be potentially reused for other
-// invocations to the same function/function version.
-func FaaSInstance(val string) attribute.KeyValue {
- return FaaSInstanceKey.String(val)
-}
-
-// FaaSInvocationID returns an attribute KeyValue conforming to the
-// "faas.invocation_id" semantic conventions. It represents the invocation ID
-// of the current function invocation.
-func FaaSInvocationID(val string) attribute.KeyValue {
- return FaaSInvocationIDKey.String(val)
-}
-
-// FaaSInvokedName returns an attribute KeyValue conforming to the
-// "faas.invoked_name" semantic conventions. It represents the name of the
-// invoked function.
-func FaaSInvokedName(val string) attribute.KeyValue {
- return FaaSInvokedNameKey.String(val)
-}
-
-// FaaSInvokedRegion returns an attribute KeyValue conforming to the
-// "faas.invoked_region" semantic conventions. It represents the cloud region
-// of the invoked function.
-func FaaSInvokedRegion(val string) attribute.KeyValue {
- return FaaSInvokedRegionKey.String(val)
-}
-
-// FaaSMaxMemory returns an attribute KeyValue conforming to the
-// "faas.max_memory" semantic conventions. It represents the amount of memory
-// available to the serverless function converted to Bytes.
-func FaaSMaxMemory(val int) attribute.KeyValue {
- return FaaSMaxMemoryKey.Int(val)
-}
-
-// FaaSName returns an attribute KeyValue conforming to the "faas.name"
-// semantic conventions. It represents the name of the single function that
-// this runtime instance executes.
-func FaaSName(val string) attribute.KeyValue {
- return FaaSNameKey.String(val)
-}
-
-// FaaSTime returns an attribute KeyValue conforming to the "faas.time"
-// semantic conventions. It represents a string containing the function
-// invocation time in the [ISO
-// 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format
-// expressed in [UTC](https://www.w3.org/TR/NOTE-datetime).
-func FaaSTime(val string) attribute.KeyValue {
- return FaaSTimeKey.String(val)
-}
-
-// FaaSVersion returns an attribute KeyValue conforming to the
-// "faas.version" semantic conventions. It represents the immutable version of
-// the function being executed.
-func FaaSVersion(val string) attribute.KeyValue {
- return FaaSVersionKey.String(val)
-}
-
-// Attributes for Feature Flags.
-const (
- // FeatureFlagKeyKey is the attribute Key conforming to the
- // "feature_flag.key" semantic conventions. It represents the unique
- // identifier of the feature flag.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'logo-color'
- FeatureFlagKeyKey = attribute.Key("feature_flag.key")
-
- // FeatureFlagProviderNameKey is the attribute Key conforming to the
- // "feature_flag.provider_name" semantic conventions. It represents the
- // name of the service provider that performs the flag evaluation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Flag Manager'
- FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider_name")
-
- // FeatureFlagVariantKey is the attribute Key conforming to the
- // "feature_flag.variant" semantic conventions. It represents the sHOULD be
- // a semantic identifier for a value. If one is unavailable, a stringified
- // version of the value can be used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'red', 'true', 'on'
- // Note: A semantic identifier, commonly referred to as a variant, provides
- // a means
- // for referring to a value without including the value itself. This can
- // provide additional context for understanding the meaning behind a value.
- // For example, the variant `red` maybe be used for the value `#c05543`.
- //
- // A stringified version of the value can be used in situations where a
- // semantic identifier is unavailable. String representation of the value
- // should be determined by the implementer.
- FeatureFlagVariantKey = attribute.Key("feature_flag.variant")
-)
-
-// FeatureFlagKey returns an attribute KeyValue conforming to the
-// "feature_flag.key" semantic conventions. It represents the unique identifier
-// of the feature flag.
-func FeatureFlagKey(val string) attribute.KeyValue {
- return FeatureFlagKeyKey.String(val)
-}
-
-// FeatureFlagProviderName returns an attribute KeyValue conforming to the
-// "feature_flag.provider_name" semantic conventions. It represents the name of
-// the service provider that performs the flag evaluation.
-func FeatureFlagProviderName(val string) attribute.KeyValue {
- return FeatureFlagProviderNameKey.String(val)
-}
-
-// FeatureFlagVariant returns an attribute KeyValue conforming to the
-// "feature_flag.variant" semantic conventions. It represents the sHOULD be a
-// semantic identifier for a value. If one is unavailable, a stringified
-// version of the value can be used.
-func FeatureFlagVariant(val string) attribute.KeyValue {
- return FeatureFlagVariantKey.String(val)
-}
-
-// Describes file attributes.
-const (
- // FileDirectoryKey is the attribute Key conforming to the "file.directory"
- // semantic conventions. It represents the directory where the file is
- // located. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/user', 'C:\\Program Files\\MyApp'
- FileDirectoryKey = attribute.Key("file.directory")
-
- // FileExtensionKey is the attribute Key conforming to the "file.extension"
- // semantic conventions. It represents the file extension, excluding the
- // leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: When the file name has multiple extensions (example.tar.gz), only
- // the last one should be captured ("gz", not "tar.gz").
- FileExtensionKey = attribute.Key("file.extension")
-
- // FileNameKey is the attribute Key conforming to the "file.name" semantic
- // conventions. It represents the name of the file including the extension,
- // without the directory.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.png'
- FileNameKey = attribute.Key("file.name")
-
- // FilePathKey is the attribute Key conforming to the "file.path" semantic
- // conventions. It represents the full path to the file, including the file
- // name. It should include the drive letter, when appropriate.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/home/alice/example.png', 'C:\\Program
- // Files\\MyApp\\myapp.exe'
- FilePathKey = attribute.Key("file.path")
-
- // FileSizeKey is the attribute Key conforming to the "file.size" semantic
- // conventions. It represents the file size in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- FileSizeKey = attribute.Key("file.size")
-)
-
-// FileDirectory returns an attribute KeyValue conforming to the
-// "file.directory" semantic conventions. It represents the directory where the
-// file is located. It should include the drive letter, when appropriate.
-func FileDirectory(val string) attribute.KeyValue {
- return FileDirectoryKey.String(val)
-}
-
-// FileExtension returns an attribute KeyValue conforming to the
-// "file.extension" semantic conventions. It represents the file extension,
-// excluding the leading dot.
-func FileExtension(val string) attribute.KeyValue {
- return FileExtensionKey.String(val)
-}
-
-// FileName returns an attribute KeyValue conforming to the "file.name"
-// semantic conventions. It represents the name of the file including the
-// extension, without the directory.
-func FileName(val string) attribute.KeyValue {
- return FileNameKey.String(val)
-}
-
-// FilePath returns an attribute KeyValue conforming to the "file.path"
-// semantic conventions. It represents the full path to the file, including the
-// file name. It should include the drive letter, when appropriate.
-func FilePath(val string) attribute.KeyValue {
- return FilePathKey.String(val)
-}
-
-// FileSize returns an attribute KeyValue conforming to the "file.size"
-// semantic conventions. It represents the file size in bytes.
-func FileSize(val int) attribute.KeyValue {
- return FileSizeKey.Int(val)
-}
-
-// Attributes for Google Cloud Run.
-const (
- // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.execution" semantic conventions. It represents the
- // name of the Cloud Run
- // [execution](https://cloud.google.com/run/docs/managing/job-executions)
- // being run for the Job, as set by the
- // [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'job-name-xxxx', 'sample-job-mdw84'
- GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
-
- // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
- // "gcp.cloud_run.job.task_index" semantic conventions. It represents the
- // index for a task within an execution as provided by the
- // [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
- // environment variable.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1
- GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
-)
-
-// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.execution" semantic conventions. It represents the name
-// of the Cloud Run
-// [execution](https://cloud.google.com/run/docs/managing/job-executions) being
-// run for the Job, as set by the
-// [`CLOUD_RUN_EXECUTION`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobExecution(val string) attribute.KeyValue {
- return GCPCloudRunJobExecutionKey.String(val)
-}
-
-// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
-// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
-// for a task within an execution as provided by the
-// [`CLOUD_RUN_TASK_INDEX`](https://cloud.google.com/run/docs/container-contract#jobs-env-vars)
-// environment variable.
-func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
- return GCPCloudRunJobTaskIndexKey.Int(val)
-}
-
-// Attributes for Google Compute Engine (GCE).
-const (
- // GCPGceInstanceHostnameKey is the attribute Key conforming to the
- // "gcp.gce.instance.hostname" semantic conventions. It represents the
- // hostname of a GCE instance. This is the full value of the default or
- // [custom
- // hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-host1234.example.com',
- // 'sample-vm.us-west1-b.c.my-project.internal'
- GCPGceInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
-
- // GCPGceInstanceNameKey is the attribute Key conforming to the
- // "gcp.gce.instance.name" semantic conventions. It represents the instance
- // name of a GCE instance. This is the value provided by `host.name`, the
- // visible name of the instance in the Cloud Console UI, and the prefix for
- // the default hostname of the instance as defined by the [default internal
- // DNS
- // name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'instance-1', 'my-vm-name'
- GCPGceInstanceNameKey = attribute.Key("gcp.gce.instance.name")
-)
-
-// GCPGceInstanceHostname returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
-// of a GCE instance. This is the full value of the default or [custom
-// hostname](https://cloud.google.com/compute/docs/instances/custom-hostname-vm).
-func GCPGceInstanceHostname(val string) attribute.KeyValue {
- return GCPGceInstanceHostnameKey.String(val)
-}
-
-// GCPGceInstanceName returns an attribute KeyValue conforming to the
-// "gcp.gce.instance.name" semantic conventions. It represents the instance
-// name of a GCE instance. This is the value provided by `host.name`, the
-// visible name of the instance in the Cloud Console UI, and the prefix for the
-// default hostname of the instance as defined by the [default internal DNS
-// name](https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names).
-func GCPGceInstanceName(val string) attribute.KeyValue {
- return GCPGceInstanceNameKey.String(val)
-}
-
-// The attributes used to describe telemetry in the context of LLM (Large
-// Language Models) requests and responses.
-const (
- // GenAiCompletionKey is the attribute Key conforming to the
- // "gen_ai.completion" semantic conventions. It represents the full
- // response received from the LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'assistant', 'content': 'The capital of France is
- // Paris.'}]"
- // Note: It's RECOMMENDED to format completions as JSON string matching
- // [OpenAI messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiCompletionKey = attribute.Key("gen_ai.completion")
-
- // GenAiPromptKey is the attribute Key conforming to the "gen_ai.prompt"
- // semantic conventions. It represents the full prompt sent to an LLM.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: "[{'role': 'user', 'content': 'What is the capital of
- // France?'}]"
- // Note: It's RECOMMENDED to format prompts as JSON string matching [OpenAI
- // messages
- // format](https://platform.openai.com/docs/guides/text-generation)
- GenAiPromptKey = attribute.Key("gen_ai.prompt")
-
- // GenAiRequestMaxTokensKey is the attribute Key conforming to the
- // "gen_ai.request.max_tokens" semantic conventions. It represents the
- // maximum number of tokens the LLM generates for a request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
-
- // GenAiRequestModelKey is the attribute Key conforming to the
- // "gen_ai.request.model" semantic conventions. It represents the name of
- // the LLM a request is being made to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4'
- GenAiRequestModelKey = attribute.Key("gen_ai.request.model")
-
- // GenAiRequestTemperatureKey is the attribute Key conforming to the
- // "gen_ai.request.temperature" semantic conventions. It represents the
- // temperature setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0.0
- GenAiRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
-
- // GenAiRequestTopPKey is the attribute Key conforming to the
- // "gen_ai.request.top_p" semantic conventions. It represents the top_p
- // sampling setting for the LLM request.
- //
- // Type: double
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1.0
- GenAiRequestTopPKey = attribute.Key("gen_ai.request.top_p")
-
- // GenAiResponseFinishReasonsKey is the attribute Key conforming to the
- // "gen_ai.response.finish_reasons" semantic conventions. It represents the
- // array of reasons the model stopped generating tokens, corresponding to
- // each generation received.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'stop'
- GenAiResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
-
- // GenAiResponseIDKey is the attribute Key conforming to the
- // "gen_ai.response.id" semantic conventions. It represents the unique
- // identifier for the completion.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'chatcmpl-123'
- GenAiResponseIDKey = attribute.Key("gen_ai.response.id")
-
- // GenAiResponseModelKey is the attribute Key conforming to the
- // "gen_ai.response.model" semantic conventions. It represents the name of
- // the LLM a response was generated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'gpt-4-0613'
- GenAiResponseModelKey = attribute.Key("gen_ai.response.model")
-
- // GenAiSystemKey is the attribute Key conforming to the "gen_ai.system"
- // semantic conventions. It represents the Generative AI product as
- // identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'openai'
- // Note: The actual GenAI product may differ from the one identified by the
- // client. For example, when using OpenAI client libraries to communicate
- // with Mistral, the `gen_ai.system` is set to `openai` based on the
- // instrumentation's best knowledge.
- GenAiSystemKey = attribute.Key("gen_ai.system")
-
- // GenAiUsageCompletionTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.completion_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM response (completion).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 180
- GenAiUsageCompletionTokensKey = attribute.Key("gen_ai.usage.completion_tokens")
-
- // GenAiUsagePromptTokensKey is the attribute Key conforming to the
- // "gen_ai.usage.prompt_tokens" semantic conventions. It represents the
- // number of tokens used in the LLM prompt.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 100
- GenAiUsagePromptTokensKey = attribute.Key("gen_ai.usage.prompt_tokens")
-)
-
-var (
- // OpenAI
- GenAiSystemOpenai = GenAiSystemKey.String("openai")
-)
-
-// GenAiCompletion returns an attribute KeyValue conforming to the
-// "gen_ai.completion" semantic conventions. It represents the full response
-// received from the LLM.
-func GenAiCompletion(val string) attribute.KeyValue {
- return GenAiCompletionKey.String(val)
-}
-
-// GenAiPrompt returns an attribute KeyValue conforming to the
-// "gen_ai.prompt" semantic conventions. It represents the full prompt sent to
-// an LLM.
-func GenAiPrompt(val string) attribute.KeyValue {
- return GenAiPromptKey.String(val)
-}
-
-// GenAiRequestMaxTokens returns an attribute KeyValue conforming to the
-// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
-// number of tokens the LLM generates for a request.
-func GenAiRequestMaxTokens(val int) attribute.KeyValue {
- return GenAiRequestMaxTokensKey.Int(val)
-}
-
-// GenAiRequestModel returns an attribute KeyValue conforming to the
-// "gen_ai.request.model" semantic conventions. It represents the name of the
-// LLM a request is being made to.
-func GenAiRequestModel(val string) attribute.KeyValue {
- return GenAiRequestModelKey.String(val)
-}
-
-// GenAiRequestTemperature returns an attribute KeyValue conforming to the
-// "gen_ai.request.temperature" semantic conventions. It represents the
-// temperature setting for the LLM request.
-func GenAiRequestTemperature(val float64) attribute.KeyValue {
- return GenAiRequestTemperatureKey.Float64(val)
-}
-
-// GenAiRequestTopP returns an attribute KeyValue conforming to the
-// "gen_ai.request.top_p" semantic conventions. It represents the top_p
-// sampling setting for the LLM request.
-func GenAiRequestTopP(val float64) attribute.KeyValue {
- return GenAiRequestTopPKey.Float64(val)
-}
-
-// GenAiResponseFinishReasons returns an attribute KeyValue conforming to
-// the "gen_ai.response.finish_reasons" semantic conventions. It represents the
-// array of reasons the model stopped generating tokens, corresponding to each
-// generation received.
-func GenAiResponseFinishReasons(val ...string) attribute.KeyValue {
- return GenAiResponseFinishReasonsKey.StringSlice(val)
-}
-
-// GenAiResponseID returns an attribute KeyValue conforming to the
-// "gen_ai.response.id" semantic conventions. It represents the unique
-// identifier for the completion.
-func GenAiResponseID(val string) attribute.KeyValue {
- return GenAiResponseIDKey.String(val)
-}
-
-// GenAiResponseModel returns an attribute KeyValue conforming to the
-// "gen_ai.response.model" semantic conventions. It represents the name of the
-// LLM a response was generated from.
-func GenAiResponseModel(val string) attribute.KeyValue {
- return GenAiResponseModelKey.String(val)
-}
-
-// GenAiUsageCompletionTokens returns an attribute KeyValue conforming to
-// the "gen_ai.usage.completion_tokens" semantic conventions. It represents the
-// number of tokens used in the LLM response (completion).
-func GenAiUsageCompletionTokens(val int) attribute.KeyValue {
- return GenAiUsageCompletionTokensKey.Int(val)
-}
-
-// GenAiUsagePromptTokens returns an attribute KeyValue conforming to the
-// "gen_ai.usage.prompt_tokens" semantic conventions. It represents the number
-// of tokens used in the LLM prompt.
-func GenAiUsagePromptTokens(val int) attribute.KeyValue {
- return GenAiUsagePromptTokensKey.Int(val)
-}
-
-// Attributes for GraphQL.
-const (
- // GraphqlDocumentKey is the attribute Key conforming to the
- // "graphql.document" semantic conventions. It represents the GraphQL
- // document being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query findBookByID { bookByID(id: ?) { name } }'
- // Note: The value may be sanitized to exclude sensitive information.
- GraphqlDocumentKey = attribute.Key("graphql.document")
-
- // GraphqlOperationNameKey is the attribute Key conforming to the
- // "graphql.operation.name" semantic conventions. It represents the name of
- // the operation being executed.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'findBookByID'
- GraphqlOperationNameKey = attribute.Key("graphql.operation.name")
-
- // GraphqlOperationTypeKey is the attribute Key conforming to the
- // "graphql.operation.type" semantic conventions. It represents the type of
- // the operation being executed.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'query', 'mutation', 'subscription'
- GraphqlOperationTypeKey = attribute.Key("graphql.operation.type")
-)
-
-var (
- // GraphQL query
- GraphqlOperationTypeQuery = GraphqlOperationTypeKey.String("query")
- // GraphQL mutation
- GraphqlOperationTypeMutation = GraphqlOperationTypeKey.String("mutation")
- // GraphQL subscription
- GraphqlOperationTypeSubscription = GraphqlOperationTypeKey.String("subscription")
-)
-
-// GraphqlDocument returns an attribute KeyValue conforming to the
-// "graphql.document" semantic conventions. It represents the GraphQL document
-// being executed.
-func GraphqlDocument(val string) attribute.KeyValue {
- return GraphqlDocumentKey.String(val)
-}
-
-// GraphqlOperationName returns an attribute KeyValue conforming to the
-// "graphql.operation.name" semantic conventions. It represents the name of the
-// operation being executed.
-func GraphqlOperationName(val string) attribute.KeyValue {
- return GraphqlOperationNameKey.String(val)
-}
-
-// Attributes for the Android platform on which the Android application is
-// running.
-const (
- // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
- // semantic conventions. It represents the unique identifier for the
- // application
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2daa2797-e42b-4624-9322-ec3f968df4da'
- HerokuAppIDKey = attribute.Key("heroku.app.id")
-
- // HerokuReleaseCommitKey is the attribute Key conforming to the
- // "heroku.release.commit" semantic conventions. It represents the commit
- // hash for the current release
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'e6134959463efd8966b20e75b913cafe3f5ec'
- HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
-
- // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
- // "heroku.release.creation_timestamp" semantic conventions. It represents
- // the time and date the release was created
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2022-10-23T18:00:42Z'
- HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
-)
-
-// HerokuAppID returns an attribute KeyValue conforming to the
-// "heroku.app.id" semantic conventions. It represents the unique identifier
-// for the application
-func HerokuAppID(val string) attribute.KeyValue {
- return HerokuAppIDKey.String(val)
-}
-
-// HerokuReleaseCommit returns an attribute KeyValue conforming to the
-// "heroku.release.commit" semantic conventions. It represents the commit hash
-// for the current release
-func HerokuReleaseCommit(val string) attribute.KeyValue {
- return HerokuReleaseCommitKey.String(val)
-}
-
-// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming
-// to the "heroku.release.creation_timestamp" semantic conventions. It
-// represents the time and date the release was created
-func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
- return HerokuReleaseCreationTimestampKey.String(val)
-}
-
-// A host is defined as a computing instance. For example, physical servers,
-// virtual machines, switches or disk array.
-const (
- // HostArchKey is the attribute Key conforming to the "host.arch" semantic
- // conventions. It represents the CPU architecture the host system is
- // running on.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- HostArchKey = attribute.Key("host.arch")
-
- // HostCPUCacheL2SizeKey is the attribute Key conforming to the
- // "host.cpu.cache.l2.size" semantic conventions. It represents the amount
- // of level 2 memory cache available to the processor (in Bytes).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12288000
- HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
-
- // HostCPUFamilyKey is the attribute Key conforming to the
- // "host.cpu.family" semantic conventions. It represents the family or
- // generation of the CPU.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', 'PA-RISC 1.1e'
- HostCPUFamilyKey = attribute.Key("host.cpu.family")
-
- // HostCPUModelIDKey is the attribute Key conforming to the
- // "host.cpu.model.id" semantic conventions. It represents the model
- // identifier. It provides more granular information about the CPU,
- // distinguishing it from other CPUs within the same family.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '6', '9000/778/B180L'
- HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
-
- // HostCPUModelNameKey is the attribute Key conforming to the
- // "host.cpu.model.name" semantic conventions. It represents the model
- // designation of the processor.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz'
- HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
-
- // HostCPUSteppingKey is the attribute Key conforming to the
- // "host.cpu.stepping" semantic conventions. It represents the stepping or
- // core revisions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1', 'r1p1'
- HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
-
- // HostCPUVendorIDKey is the attribute Key conforming to the
- // "host.cpu.vendor.id" semantic conventions. It represents the processor
- // manufacturer identifier. A maximum 12-character string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'GenuineIntel'
- // Note: [CPUID](https://wiki.osdev.org/CPUID) command returns the vendor
- // ID string in EBX, EDX and ECX registers. Writing these to memory in this
- // order results in a 12-character string.
- HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
-
- // HostIDKey is the attribute Key conforming to the "host.id" semantic
- // conventions. It represents the unique host ID. For Cloud, this must be
- // the instance_id assigned by the cloud provider. For non-containerized
- // systems, this should be the `machine-id`. See the table below for the
- // sources to use to determine the `machine-id` based on operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'fdbf79e8af94cb7f9e8df36789187052'
- HostIDKey = attribute.Key("host.id")
-
- // HostImageIDKey is the attribute Key conforming to the "host.image.id"
- // semantic conventions. It represents the vM image ID or host OS image ID.
- // For Cloud, this value is from the provider.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ami-07b06b442921831e5'
- HostImageIDKey = attribute.Key("host.image.id")
-
- // HostImageNameKey is the attribute Key conforming to the
- // "host.image.name" semantic conventions. It represents the name of the VM
- // image or OS install the host was instantiated from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905'
- HostImageNameKey = attribute.Key("host.image.name")
-
- // HostImageVersionKey is the attribute Key conforming to the
- // "host.image.version" semantic conventions. It represents the version
- // string of the VM image or host OS as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0.1'
- HostImageVersionKey = attribute.Key("host.image.version")
-
- // HostIPKey is the attribute Key conforming to the "host.ip" semantic
- // conventions. It represents the available IP addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '192.168.1.140', 'fe80::abc2:4a28:737a:609e'
- // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
- // addresses MUST be specified in the [RFC
- // 5952](https://www.rfc-editor.org/rfc/rfc5952.html) format.
- HostIPKey = attribute.Key("host.ip")
-
- // HostMacKey is the attribute Key conforming to the "host.mac" semantic
- // conventions. It represents the available MAC addresses of the host,
- // excluding loopback interfaces.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AC-DE-48-23-45-67', 'AC-DE-48-23-45-67-01-9F'
- // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal
- // form](https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf):
- // as hyphen-separated octets in uppercase hexadecimal form from most to
- // least significant.
- HostMacKey = attribute.Key("host.mac")
-
- // HostNameKey is the attribute Key conforming to the "host.name" semantic
- // conventions. It represents the name of the host. On Unix systems, it may
- // contain what the hostname command returns, or the fully qualified
- // hostname, or another name specified by the user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-test'
- HostNameKey = attribute.Key("host.name")
-
- // HostTypeKey is the attribute Key conforming to the "host.type" semantic
- // conventions. It represents the type of host. For Cloud, this must be the
- // machine type.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'n1-standard-1'
- HostTypeKey = attribute.Key("host.type")
-)
-
-var (
- // AMD64
- HostArchAMD64 = HostArchKey.String("amd64")
- // ARM32
- HostArchARM32 = HostArchKey.String("arm32")
- // ARM64
- HostArchARM64 = HostArchKey.String("arm64")
- // Itanium
- HostArchIA64 = HostArchKey.String("ia64")
- // 32-bit PowerPC
- HostArchPPC32 = HostArchKey.String("ppc32")
- // 64-bit PowerPC
- HostArchPPC64 = HostArchKey.String("ppc64")
- // IBM z/Architecture
- HostArchS390x = HostArchKey.String("s390x")
- // 32-bit x86
- HostArchX86 = HostArchKey.String("x86")
-)
-
-// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
-// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
-// level 2 memory cache available to the processor (in Bytes).
-func HostCPUCacheL2Size(val int) attribute.KeyValue {
- return HostCPUCacheL2SizeKey.Int(val)
-}
-
-// HostCPUFamily returns an attribute KeyValue conforming to the
-// "host.cpu.family" semantic conventions. It represents the family or
-// generation of the CPU.
-func HostCPUFamily(val string) attribute.KeyValue {
- return HostCPUFamilyKey.String(val)
-}
-
-// HostCPUModelID returns an attribute KeyValue conforming to the
-// "host.cpu.model.id" semantic conventions. It represents the model
-// identifier. It provides more granular information about the CPU,
-// distinguishing it from other CPUs within the same family.
-func HostCPUModelID(val string) attribute.KeyValue {
- return HostCPUModelIDKey.String(val)
-}
-
-// HostCPUModelName returns an attribute KeyValue conforming to the
-// "host.cpu.model.name" semantic conventions. It represents the model
-// designation of the processor.
-func HostCPUModelName(val string) attribute.KeyValue {
- return HostCPUModelNameKey.String(val)
-}
-
-// HostCPUStepping returns an attribute KeyValue conforming to the
-// "host.cpu.stepping" semantic conventions. It represents the stepping or core
-// revisions.
-func HostCPUStepping(val string) attribute.KeyValue {
- return HostCPUSteppingKey.String(val)
-}
-
-// HostCPUVendorID returns an attribute KeyValue conforming to the
-// "host.cpu.vendor.id" semantic conventions. It represents the processor
-// manufacturer identifier. A maximum 12-character string.
-func HostCPUVendorID(val string) attribute.KeyValue {
- return HostCPUVendorIDKey.String(val)
-}
-
-// HostID returns an attribute KeyValue conforming to the "host.id" semantic
-// conventions. It represents the unique host ID. For Cloud, this must be the
-// instance_id assigned by the cloud provider. For non-containerized systems,
-// this should be the `machine-id`. See the table below for the sources to use
-// to determine the `machine-id` based on operating system.
-func HostID(val string) attribute.KeyValue {
- return HostIDKey.String(val)
-}
-
-// HostImageID returns an attribute KeyValue conforming to the
-// "host.image.id" semantic conventions. It represents the vM image ID or host
-// OS image ID. For Cloud, this value is from the provider.
-func HostImageID(val string) attribute.KeyValue {
- return HostImageIDKey.String(val)
-}
-
-// HostImageName returns an attribute KeyValue conforming to the
-// "host.image.name" semantic conventions. It represents the name of the VM
-// image or OS install the host was instantiated from.
-func HostImageName(val string) attribute.KeyValue {
- return HostImageNameKey.String(val)
-}
-
-// HostImageVersion returns an attribute KeyValue conforming to the
-// "host.image.version" semantic conventions. It represents the version string
-// of the VM image or host OS as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func HostImageVersion(val string) attribute.KeyValue {
- return HostImageVersionKey.String(val)
-}
-
-// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
-// conventions. It represents the available IP addresses of the host, excluding
-// loopback interfaces.
-func HostIP(val ...string) attribute.KeyValue {
- return HostIPKey.StringSlice(val)
-}
-
-// HostMac returns an attribute KeyValue conforming to the "host.mac"
-// semantic conventions. It represents the available MAC addresses of the host,
-// excluding loopback interfaces.
-func HostMac(val ...string) attribute.KeyValue {
- return HostMacKey.StringSlice(val)
-}
-
-// HostName returns an attribute KeyValue conforming to the "host.name"
-// semantic conventions. It represents the name of the host. On Unix systems,
-// it may contain what the hostname command returns, or the fully qualified
-// hostname, or another name specified by the user.
-func HostName(val string) attribute.KeyValue {
- return HostNameKey.String(val)
-}
-
-// HostType returns an attribute KeyValue conforming to the "host.type"
-// semantic conventions. It represents the type of host. For Cloud, this must
-// be the machine type.
-func HostType(val string) attribute.KeyValue {
- return HostTypeKey.String(val)
-}
-
-// Semantic convention attributes in the HTTP namespace.
-const (
- // HTTPConnectionStateKey is the attribute Key conforming to the
- // "http.connection.state" semantic conventions. It represents the state of
- // the HTTP connection in the HTTP connection pool.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'active', 'idle'
- HTTPConnectionStateKey = attribute.Key("http.connection.state")
-
- // HTTPRequestBodySizeKey is the attribute Key conforming to the
- // "http.request.body.size" semantic conventions. It represents the size of
- // the request payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
-
- // HTTPRequestMethodKey is the attribute Key conforming to the
- // "http.request.method" semantic conventions. It represents the hTTP
- // request method.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GET', 'POST', 'HEAD'
- // Note: HTTP request method value SHOULD be "known" to the
- // instrumentation.
- // By default, this convention defines "known" methods as the ones listed
- // in [RFC9110](https://www.rfc-editor.org/rfc/rfc9110.html#name-methods)
- // and the PATCH method defined in
- // [RFC5789](https://www.rfc-editor.org/rfc/rfc5789.html).
- //
- // If the HTTP request method is not known to instrumentation, it MUST set
- // the `http.request.method` attribute to `_OTHER`.
- //
- // If the HTTP instrumentation could end up converting valid HTTP request
- // methods to `_OTHER`, then it MUST provide a way to override
- // the list of known HTTP methods. If this override is done via environment
- // variable, then the environment variable MUST be named
- // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated
- // list of case-sensitive known HTTP methods
- // (this list MUST be a full override of the default known method, it is
- // not a list of known methods in addition to the defaults).
- //
- // HTTP method names are case-sensitive and `http.request.method` attribute
- // value MUST match a known HTTP method name exactly.
- // Instrumentations for specific web frameworks that consider HTTP methods
- // to be case insensitive, SHOULD populate a canonical equivalent.
- // Tracing instrumentations that do so, MUST also set
- // `http.request.method_original` to the original value.
- HTTPRequestMethodKey = attribute.Key("http.request.method")
-
- // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
- // "http.request.method_original" semantic conventions. It represents the
- // original HTTP method sent by the client in the request line.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'GeT', 'ACL', 'foo'
- HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
-
- // HTTPRequestResendCountKey is the attribute Key conforming to the
- // "http.request.resend_count" semantic conventions. It represents the
- // ordinal number of request resending attempt (for any reason, including
- // redirects).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 3
- // Note: The resend count SHOULD be updated each time an HTTP request gets
- // resent by the client, regardless of what was the cause of the resending
- // (e.g. redirection, authorization failure, 503 Server Unavailable,
- // network issues, or any other).
- HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
-
- // HTTPRequestSizeKey is the attribute Key conforming to the
- // "http.request.size" semantic conventions. It represents the total size
- // of the request in bytes. This should be the total number of bytes sent
- // over the wire, including the request line (HTTP/1.1), framing (HTTP/2
- // and HTTP/3), headers, and request body if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPRequestSizeKey = attribute.Key("http.request.size")
-
- // HTTPResponseBodySizeKey is the attribute Key conforming to the
- // "http.response.body.size" semantic conventions. It represents the size
- // of the response payload body in bytes. This is the number of bytes
- // transferred excluding headers and is often, but not always, present as
- // the
- // [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
- // header. For requests using transport encoding, this should be the
- // compressed size.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3495
- HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
-
- // HTTPResponseSizeKey is the attribute Key conforming to the
- // "http.response.size" semantic conventions. It represents the total size
- // of the response in bytes. This should be the total number of bytes sent
- // over the wire, including the status line (HTTP/1.1), framing (HTTP/2 and
- // HTTP/3), headers, and response body and trailers if any.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1437
- HTTPResponseSizeKey = attribute.Key("http.response.size")
-
- // HTTPResponseStatusCodeKey is the attribute Key conforming to the
- // "http.response.status_code" semantic conventions. It represents the
- // [HTTP response status
- // code](https://tools.ietf.org/html/rfc7231#section-6).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 200
- HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
-
- // HTTPRouteKey is the attribute Key conforming to the "http.route"
- // semantic conventions. It represents the matched route, that is, the path
- // template in the format used by the respective server framework.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/users/:userID?', '{controller}/{action}/{id?}'
- // Note: MUST NOT be populated when this is not supported by the HTTP
- // server framework as the route attribute should have low-cardinality and
- // the URI path can NOT substitute it.
- // SHOULD include the [application
- // root](/docs/http/http-spans.md#http-server-definitions) if there is one.
- HTTPRouteKey = attribute.Key("http.route")
-)
-
-var (
- // active state
- HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
- // idle state
- HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
-)
-
-var (
- // CONNECT method
- HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
- // DELETE method
- HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
- // GET method
- HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
- // HEAD method
- HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
- // OPTIONS method
- HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
- // PATCH method
- HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
- // POST method
- HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
- // PUT method
- HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
- // TRACE method
- HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
- // Any HTTP method that the instrumentation has no prior knowledge of
- HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
-)
-
-// HTTPRequestBodySize returns an attribute KeyValue conforming to the
-// "http.request.body.size" semantic conventions. It represents the size of the
-// request payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPRequestBodySize(val int) attribute.KeyValue {
- return HTTPRequestBodySizeKey.Int(val)
-}
-
-// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
-// "http.request.method_original" semantic conventions. It represents the
-// original HTTP method sent by the client in the request line.
-func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
- return HTTPRequestMethodOriginalKey.String(val)
-}
-
-// HTTPRequestResendCount returns an attribute KeyValue conforming to the
-// "http.request.resend_count" semantic conventions. It represents the ordinal
-// number of request resending attempt (for any reason, including redirects).
-func HTTPRequestResendCount(val int) attribute.KeyValue {
- return HTTPRequestResendCountKey.Int(val)
-}
-
-// HTTPRequestSize returns an attribute KeyValue conforming to the
-// "http.request.size" semantic conventions. It represents the total size of
-// the request in bytes. This should be the total number of bytes sent over the
-// wire, including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and request body if any.
-func HTTPRequestSize(val int) attribute.KeyValue {
- return HTTPRequestSizeKey.Int(val)
-}
-
-// HTTPResponseBodySize returns an attribute KeyValue conforming to the
-// "http.response.body.size" semantic conventions. It represents the size of
-// the response payload body in bytes. This is the number of bytes transferred
-// excluding headers and is often, but not always, present as the
-// [Content-Length](https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length)
-// header. For requests using transport encoding, this should be the compressed
-// size.
-func HTTPResponseBodySize(val int) attribute.KeyValue {
- return HTTPResponseBodySizeKey.Int(val)
-}
-
-// HTTPResponseSize returns an attribute KeyValue conforming to the
-// "http.response.size" semantic conventions. It represents the total size of
-// the response in bytes. This should be the total number of bytes sent over
-// the wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
-// headers, and response body and trailers if any.
-func HTTPResponseSize(val int) attribute.KeyValue {
- return HTTPResponseSizeKey.Int(val)
-}
-
-// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
-// "http.response.status_code" semantic conventions. It represents the [HTTP
-// response status code](https://tools.ietf.org/html/rfc7231#section-6).
-func HTTPResponseStatusCode(val int) attribute.KeyValue {
- return HTTPResponseStatusCodeKey.Int(val)
-}
-
-// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
-// semantic conventions. It represents the matched route, that is, the path
-// template in the format used by the respective server framework.
-func HTTPRoute(val string) attribute.KeyValue {
- return HTTPRouteKey.String(val)
-}
-
-// Java Virtual machine related attributes.
-const (
- // JvmBufferPoolNameKey is the attribute Key conforming to the
- // "jvm.buffer.pool.name" semantic conventions. It represents the name of
- // the buffer pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mapped', 'direct'
- // Note: Pool names are generally obtained via
- // [BufferPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/BufferPoolMXBean.html#getName()).
- JvmBufferPoolNameKey = attribute.Key("jvm.buffer.pool.name")
-
- // JvmGcActionKey is the attribute Key conforming to the "jvm.gc.action"
- // semantic conventions. It represents the name of the garbage collector
- // action.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'end of minor GC', 'end of major GC'
- // Note: Garbage collector action is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcAction()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcAction()).
- JvmGcActionKey = attribute.Key("jvm.gc.action")
-
- // JvmGcNameKey is the attribute Key conforming to the "jvm.gc.name"
- // semantic conventions. It represents the name of the garbage collector.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Young Generation', 'G1 Old Generation'
- // Note: Garbage collector name is generally obtained via
- // [GarbageCollectionNotificationInfo#getGcName()](https://docs.oracle.com/en/java/javase/11/docs/api/jdk.management/com/sun/management/GarbageCollectionNotificationInfo.html#getGcName()).
- JvmGcNameKey = attribute.Key("jvm.gc.name")
-
- // JvmMemoryPoolNameKey is the attribute Key conforming to the
- // "jvm.memory.pool.name" semantic conventions. It represents the name of
- // the memory pool.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'G1 Old Gen', 'G1 Eden space', 'G1 Survivor Space'
- // Note: Pool names are generally obtained via
- // [MemoryPoolMXBean#getName()](https://docs.oracle.com/en/java/javase/11/docs/api/java.management/java/lang/management/MemoryPoolMXBean.html#getName()).
- JvmMemoryPoolNameKey = attribute.Key("jvm.memory.pool.name")
-
- // JvmMemoryTypeKey is the attribute Key conforming to the
- // "jvm.memory.type" semantic conventions. It represents the type of
- // memory.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'heap', 'non_heap'
- JvmMemoryTypeKey = attribute.Key("jvm.memory.type")
-
- // JvmThreadDaemonKey is the attribute Key conforming to the
- // "jvm.thread.daemon" semantic conventions. It represents the whether the
- // thread is daemon or not.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: stable
- JvmThreadDaemonKey = attribute.Key("jvm.thread.daemon")
-
- // JvmThreadStateKey is the attribute Key conforming to the
- // "jvm.thread.state" semantic conventions. It represents the state of the
- // thread.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'runnable', 'blocked'
- JvmThreadStateKey = attribute.Key("jvm.thread.state")
-)
-
-var (
- // Heap memory
- JvmMemoryTypeHeap = JvmMemoryTypeKey.String("heap")
- // Non-heap memory
- JvmMemoryTypeNonHeap = JvmMemoryTypeKey.String("non_heap")
-)
-
-var (
- // A thread that has not yet started is in this state
- JvmThreadStateNew = JvmThreadStateKey.String("new")
- // A thread executing in the Java virtual machine is in this state
- JvmThreadStateRunnable = JvmThreadStateKey.String("runnable")
- // A thread that is blocked waiting for a monitor lock is in this state
- JvmThreadStateBlocked = JvmThreadStateKey.String("blocked")
- // A thread that is waiting indefinitely for another thread to perform a particular action is in this state
- JvmThreadStateWaiting = JvmThreadStateKey.String("waiting")
- // A thread that is waiting for another thread to perform an action for up to a specified waiting time is in this state
- JvmThreadStateTimedWaiting = JvmThreadStateKey.String("timed_waiting")
- // A thread that has exited is in this state
- JvmThreadStateTerminated = JvmThreadStateKey.String("terminated")
-)
-
-// JvmBufferPoolName returns an attribute KeyValue conforming to the
-// "jvm.buffer.pool.name" semantic conventions. It represents the name of the
-// buffer pool.
-func JvmBufferPoolName(val string) attribute.KeyValue {
- return JvmBufferPoolNameKey.String(val)
-}
-
-// JvmGcAction returns an attribute KeyValue conforming to the
-// "jvm.gc.action" semantic conventions. It represents the name of the garbage
-// collector action.
-func JvmGcAction(val string) attribute.KeyValue {
- return JvmGcActionKey.String(val)
-}
-
-// JvmGcName returns an attribute KeyValue conforming to the "jvm.gc.name"
-// semantic conventions. It represents the name of the garbage collector.
-func JvmGcName(val string) attribute.KeyValue {
- return JvmGcNameKey.String(val)
-}
-
-// JvmMemoryPoolName returns an attribute KeyValue conforming to the
-// "jvm.memory.pool.name" semantic conventions. It represents the name of the
-// memory pool.
-func JvmMemoryPoolName(val string) attribute.KeyValue {
- return JvmMemoryPoolNameKey.String(val)
-}
-
-// JvmThreadDaemon returns an attribute KeyValue conforming to the
-// "jvm.thread.daemon" semantic conventions. It represents the whether the
-// thread is daemon or not.
-func JvmThreadDaemon(val bool) attribute.KeyValue {
- return JvmThreadDaemonKey.Bool(val)
-}
-
-// Kubernetes resource attributes.
-const (
- // K8SClusterNameKey is the attribute Key conforming to the
- // "k8s.cluster.name" semantic conventions. It represents the name of the
- // cluster.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-cluster'
- K8SClusterNameKey = attribute.Key("k8s.cluster.name")
-
- // K8SClusterUIDKey is the attribute Key conforming to the
- // "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for
- // the cluster, set to the UID of the `kube-system` namespace.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '218fc5a9-a5f1-4b54-aa05-46717d0ab26d'
- // Note: K8S doesn't have support for obtaining a cluster ID. If this is
- // ever
- // added, we will recommend collecting the `k8s.cluster.uid` through the
- // official APIs. In the meantime, we are able to use the `uid` of the
- // `kube-system` namespace as a proxy for cluster ID. Read on for the
- // rationale.
- //
- // Every object created in a K8S cluster is assigned a distinct UID. The
- // `kube-system` namespace is used by Kubernetes itself and will exist
- // for the lifetime of the cluster. Using the `uid` of the `kube-system`
- // namespace is a reasonable proxy for the K8S ClusterID as it will only
- // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
- // UUIDs as standardized by
- // [ISO/IEC 9834-8 and ITU-T
- // X.667](https://www.itu.int/ITU-T/studygroups/com17/oid.html).
- // Which states:
- //
- // > If generated according to one of the mechanisms defined in Rec.
- // ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
- // different from all other UUIDs generated before 3603 A.D., or is
- // extremely likely to be different (depending on the mechanism chosen).
- //
- // Therefore, UIDs between clusters should be extremely unlikely to
- // conflict.
- K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
-
- // K8SContainerNameKey is the attribute Key conforming to the
- // "k8s.container.name" semantic conventions. It represents the name of the
- // Container from Pod specification, must be unique within a Pod. Container
- // runtime usually uses different globally unique name (`container.name`).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'redis'
- K8SContainerNameKey = attribute.Key("k8s.container.name")
-
- // K8SContainerRestartCountKey is the attribute Key conforming to the
- // "k8s.container.restart_count" semantic conventions. It represents the
- // number of times the container was restarted. This attribute can be used
- // to identify a particular container (running or stopped) within a
- // container spec.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
-
- // K8SContainerStatusLastTerminatedReasonKey is the attribute Key
- // conforming to the "k8s.container.status.last_terminated_reason" semantic
- // conventions. It represents the last terminated reason of the Container.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Evicted', 'Error'
- K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
-
- // K8SCronJobNameKey is the attribute Key conforming to the
- // "k8s.cronjob.name" semantic conventions. It represents the name of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
-
- // K8SCronJobUIDKey is the attribute Key conforming to the
- // "k8s.cronjob.uid" semantic conventions. It represents the UID of the
- // CronJob.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
-
- // K8SDaemonSetNameKey is the attribute Key conforming to the
- // "k8s.daemonset.name" semantic conventions. It represents the name of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
-
- // K8SDaemonSetUIDKey is the attribute Key conforming to the
- // "k8s.daemonset.uid" semantic conventions. It represents the UID of the
- // DaemonSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
-
- // K8SDeploymentNameKey is the attribute Key conforming to the
- // "k8s.deployment.name" semantic conventions. It represents the name of
- // the Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
-
- // K8SDeploymentUIDKey is the attribute Key conforming to the
- // "k8s.deployment.uid" semantic conventions. It represents the UID of the
- // Deployment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
-
- // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name"
- // semantic conventions. It represents the name of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SJobNameKey = attribute.Key("k8s.job.name")
-
- // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid"
- // semantic conventions. It represents the UID of the Job.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SJobUIDKey = attribute.Key("k8s.job.uid")
-
- // K8SNamespaceNameKey is the attribute Key conforming to the
- // "k8s.namespace.name" semantic conventions. It represents the name of the
- // namespace that the pod is running in.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'default'
- K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
-
- // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
- // semantic conventions. It represents the name of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'node-1'
- K8SNodeNameKey = attribute.Key("k8s.node.name")
-
- // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid"
- // semantic conventions. It represents the UID of the Node.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2'
- K8SNodeUIDKey = attribute.Key("k8s.node.uid")
-
- // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name"
- // semantic conventions. It represents the name of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry-pod-autoconf'
- K8SPodNameKey = attribute.Key("k8s.pod.name")
-
- // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid"
- // semantic conventions. It represents the UID of the Pod.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SPodUIDKey = attribute.Key("k8s.pod.uid")
-
- // K8SReplicaSetNameKey is the attribute Key conforming to the
- // "k8s.replicaset.name" semantic conventions. It represents the name of
- // the ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
-
- // K8SReplicaSetUIDKey is the attribute Key conforming to the
- // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
- // ReplicaSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
-
- // K8SStatefulSetNameKey is the attribute Key conforming to the
- // "k8s.statefulset.name" semantic conventions. It represents the name of
- // the StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry'
- K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
-
- // K8SStatefulSetUIDKey is the attribute Key conforming to the
- // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
- // StatefulSet.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff'
- K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
-)
-
-// K8SClusterName returns an attribute KeyValue conforming to the
-// "k8s.cluster.name" semantic conventions. It represents the name of the
-// cluster.
-func K8SClusterName(val string) attribute.KeyValue {
- return K8SClusterNameKey.String(val)
-}
-
-// K8SClusterUID returns an attribute KeyValue conforming to the
-// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
-// cluster, set to the UID of the `kube-system` namespace.
-func K8SClusterUID(val string) attribute.KeyValue {
- return K8SClusterUIDKey.String(val)
-}
-
-// K8SContainerName returns an attribute KeyValue conforming to the
-// "k8s.container.name" semantic conventions. It represents the name of the
-// Container from Pod specification, must be unique within a Pod. Container
-// runtime usually uses different globally unique name (`container.name`).
-func K8SContainerName(val string) attribute.KeyValue {
- return K8SContainerNameKey.String(val)
-}
-
-// K8SContainerRestartCount returns an attribute KeyValue conforming to the
-// "k8s.container.restart_count" semantic conventions. It represents the number
-// of times the container was restarted. This attribute can be used to identify
-// a particular container (running or stopped) within a container spec.
-func K8SContainerRestartCount(val int) attribute.KeyValue {
- return K8SContainerRestartCountKey.Int(val)
-}
-
-// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
-// conforming to the "k8s.container.status.last_terminated_reason" semantic
-// conventions. It represents the last terminated reason of the Container.
-func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
- return K8SContainerStatusLastTerminatedReasonKey.String(val)
-}
-
-// K8SCronJobName returns an attribute KeyValue conforming to the
-// "k8s.cronjob.name" semantic conventions. It represents the name of the
-// CronJob.
-func K8SCronJobName(val string) attribute.KeyValue {
- return K8SCronJobNameKey.String(val)
-}
-
-// K8SCronJobUID returns an attribute KeyValue conforming to the
-// "k8s.cronjob.uid" semantic conventions. It represents the UID of the
-// CronJob.
-func K8SCronJobUID(val string) attribute.KeyValue {
- return K8SCronJobUIDKey.String(val)
-}
-
-// K8SDaemonSetName returns an attribute KeyValue conforming to the
-// "k8s.daemonset.name" semantic conventions. It represents the name of the
-// DaemonSet.
-func K8SDaemonSetName(val string) attribute.KeyValue {
- return K8SDaemonSetNameKey.String(val)
-}
-
-// K8SDaemonSetUID returns an attribute KeyValue conforming to the
-// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
-// DaemonSet.
-func K8SDaemonSetUID(val string) attribute.KeyValue {
- return K8SDaemonSetUIDKey.String(val)
-}
-
-// K8SDeploymentName returns an attribute KeyValue conforming to the
-// "k8s.deployment.name" semantic conventions. It represents the name of the
-// Deployment.
-func K8SDeploymentName(val string) attribute.KeyValue {
- return K8SDeploymentNameKey.String(val)
-}
-
-// K8SDeploymentUID returns an attribute KeyValue conforming to the
-// "k8s.deployment.uid" semantic conventions. It represents the UID of the
-// Deployment.
-func K8SDeploymentUID(val string) attribute.KeyValue {
- return K8SDeploymentUIDKey.String(val)
-}
-
-// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
-// semantic conventions. It represents the name of the Job.
-func K8SJobName(val string) attribute.KeyValue {
- return K8SJobNameKey.String(val)
-}
-
-// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
-// semantic conventions. It represents the UID of the Job.
-func K8SJobUID(val string) attribute.KeyValue {
- return K8SJobUIDKey.String(val)
-}
-
-// K8SNamespaceName returns an attribute KeyValue conforming to the
-// "k8s.namespace.name" semantic conventions. It represents the name of the
-// namespace that the pod is running in.
-func K8SNamespaceName(val string) attribute.KeyValue {
- return K8SNamespaceNameKey.String(val)
-}
-
-// K8SNodeName returns an attribute KeyValue conforming to the
-// "k8s.node.name" semantic conventions. It represents the name of the Node.
-func K8SNodeName(val string) attribute.KeyValue {
- return K8SNodeNameKey.String(val)
-}
-
-// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
-// semantic conventions. It represents the UID of the Node.
-func K8SNodeUID(val string) attribute.KeyValue {
- return K8SNodeUIDKey.String(val)
-}
-
-// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
-// semantic conventions. It represents the name of the Pod.
-func K8SPodName(val string) attribute.KeyValue {
- return K8SPodNameKey.String(val)
-}
-
-// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
-// semantic conventions. It represents the UID of the Pod.
-func K8SPodUID(val string) attribute.KeyValue {
- return K8SPodUIDKey.String(val)
-}
-
-// K8SReplicaSetName returns an attribute KeyValue conforming to the
-// "k8s.replicaset.name" semantic conventions. It represents the name of the
-// ReplicaSet.
-func K8SReplicaSetName(val string) attribute.KeyValue {
- return K8SReplicaSetNameKey.String(val)
-}
-
-// K8SReplicaSetUID returns an attribute KeyValue conforming to the
-// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
-// ReplicaSet.
-func K8SReplicaSetUID(val string) attribute.KeyValue {
- return K8SReplicaSetUIDKey.String(val)
-}
-
-// K8SStatefulSetName returns an attribute KeyValue conforming to the
-// "k8s.statefulset.name" semantic conventions. It represents the name of the
-// StatefulSet.
-func K8SStatefulSetName(val string) attribute.KeyValue {
- return K8SStatefulSetNameKey.String(val)
-}
-
-// K8SStatefulSetUID returns an attribute KeyValue conforming to the
-// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
-// StatefulSet.
-func K8SStatefulSetUID(val string) attribute.KeyValue {
- return K8SStatefulSetUIDKey.String(val)
-}
-
-// Log attributes
-const (
- // LogIostreamKey is the attribute Key conforming to the "log.iostream"
- // semantic conventions. It represents the stream associated with the log.
- // See below for a list of well-known values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- LogIostreamKey = attribute.Key("log.iostream")
-)
-
-var (
- // Logs from stdout stream
- LogIostreamStdout = LogIostreamKey.String("stdout")
- // Events from stderr stream
- LogIostreamStderr = LogIostreamKey.String("stderr")
-)
-
-// Attributes for a file to which log was emitted.
-const (
- // LogFileNameKey is the attribute Key conforming to the "log.file.name"
- // semantic conventions. It represents the basename of the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'audit.log'
- LogFileNameKey = attribute.Key("log.file.name")
-
- // LogFileNameResolvedKey is the attribute Key conforming to the
- // "log.file.name_resolved" semantic conventions. It represents the
- // basename of the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'uuid.log'
- LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
-
- // LogFilePathKey is the attribute Key conforming to the "log.file.path"
- // semantic conventions. It represents the full path to the file.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/log/mysql/audit.log'
- LogFilePathKey = attribute.Key("log.file.path")
-
- // LogFilePathResolvedKey is the attribute Key conforming to the
- // "log.file.path_resolved" semantic conventions. It represents the full
- // path to the file, with symlinks resolved.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/var/lib/docker/uuid.log'
- LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
-)
-
-// LogFileName returns an attribute KeyValue conforming to the
-// "log.file.name" semantic conventions. It represents the basename of the
-// file.
-func LogFileName(val string) attribute.KeyValue {
- return LogFileNameKey.String(val)
-}
-
-// LogFileNameResolved returns an attribute KeyValue conforming to the
-// "log.file.name_resolved" semantic conventions. It represents the basename of
-// the file, with symlinks resolved.
-func LogFileNameResolved(val string) attribute.KeyValue {
- return LogFileNameResolvedKey.String(val)
-}
-
-// LogFilePath returns an attribute KeyValue conforming to the
-// "log.file.path" semantic conventions. It represents the full path to the
-// file.
-func LogFilePath(val string) attribute.KeyValue {
- return LogFilePathKey.String(val)
-}
-
-// LogFilePathResolved returns an attribute KeyValue conforming to the
-// "log.file.path_resolved" semantic conventions. It represents the full path
-// to the file, with symlinks resolved.
-func LogFilePathResolved(val string) attribute.KeyValue {
- return LogFilePathResolvedKey.String(val)
-}
-
-// The generic attributes that may be used in any Log Record.
-const (
- // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
- // semantic conventions. It represents a unique identifier for the Log
- // Record.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '01ARZ3NDEKTSV4RRFFQ69G5FAV'
- // Note: If an id is provided, other log records with the same id will be
- // considered duplicates and can be removed safely. This means, that two
- // distinguishable log records MUST have different values.
- // The id MAY be an [Universally Unique Lexicographically Sortable
- // Identifier (ULID)](https://github.com/ulid/spec), but other identifiers
- // (e.g. UUID) may be used as needed.
- LogRecordUIDKey = attribute.Key("log.record.uid")
-)
-
-// LogRecordUID returns an attribute KeyValue conforming to the
-// "log.record.uid" semantic conventions. It represents a unique identifier for
-// the Log Record.
-func LogRecordUID(val string) attribute.KeyValue {
- return LogRecordUIDKey.String(val)
-}
-
-// Attributes describing telemetry around messaging systems and messaging
-// activities.
-const (
- // MessagingBatchMessageCountKey is the attribute Key conforming to the
- // "messaging.batch.message_count" semantic conventions. It represents the
- // number of messages sent, received, or processed in the scope of the
- // batching operation.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 0, 1, 2
- // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
- // spans that operate with a single message. When a messaging client
- // library supports both batch and single-message API for the same
- // operation, instrumentations SHOULD use `messaging.batch.message_count`
- // for batching APIs and SHOULD NOT use it for single-message APIs.
- MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
-
- // MessagingClientIDKey is the attribute Key conforming to the
- // "messaging.client.id" semantic conventions. It represents a unique
- // identifier for the client that consumes or produces a message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'client-5', 'myhost@8742@s8083jm'
- MessagingClientIDKey = attribute.Key("messaging.client.id")
-
- // MessagingDestinationAnonymousKey is the attribute Key conforming to the
- // "messaging.destination.anonymous" semantic conventions. It represents a
- // boolean that is true if the message destination is anonymous (could be
- // unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
-
- // MessagingDestinationNameKey is the attribute Key conforming to the
- // "messaging.destination.name" semantic conventions. It represents the
- // message destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: Destination name SHOULD uniquely identify a specific queue, topic
- // or other entity within the broker. If
- // the broker doesn't have such notion, the destination name SHOULD
- // uniquely identify the broker.
- MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
-
- // MessagingDestinationPartitionIDKey is the attribute Key conforming to
- // the "messaging.destination.partition.id" semantic conventions. It
- // represents the identifier of the partition messages are sent to or
- // received from, unique within the `messaging.destination.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1'
- MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
-
- // MessagingDestinationTemplateKey is the attribute Key conforming to the
- // "messaging.destination.template" semantic conventions. It represents the
- // low cardinality representation of the messaging destination name
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/customers/{customerID}'
- // Note: Destination names could be constructed from templates. An example
- // would be a destination name involving a user name or product id.
- // Although the destination name in this case is of high cardinality, the
- // underlying template is of low cardinality and can be effectively used
- // for grouping and aggregation.
- MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
-
- // MessagingDestinationTemporaryKey is the attribute Key conforming to the
- // "messaging.destination.temporary" semantic conventions. It represents a
- // boolean that is true if the message destination is temporary and might
- // not exist anymore after messages are processed.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
-
- // MessagingDestinationPublishAnonymousKey is the attribute Key conforming
- // to the "messaging.destination_publish.anonymous" semantic conventions.
- // It represents a boolean that is true if the publish message destination
- // is anonymous (could be unnamed or have auto-generated name).
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingDestinationPublishAnonymousKey = attribute.Key("messaging.destination_publish.anonymous")
-
- // MessagingDestinationPublishNameKey is the attribute Key conforming to
- // the "messaging.destination_publish.name" semantic conventions. It
- // represents the name of the original destination the message was
- // published to
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyQueue', 'MyTopic'
- // Note: The name SHOULD uniquely identify a specific queue, topic, or
- // other entity within the broker. If
- // the broker doesn't have such notion, the original destination name
- // SHOULD uniquely identify the broker.
- MessagingDestinationPublishNameKey = attribute.Key("messaging.destination_publish.name")
-
- // MessagingMessageBodySizeKey is the attribute Key conforming to the
- // "messaging.message.body.size" semantic conventions. It represents the
- // size of the message body in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1439
- // Note: This can refer to both the compressed or uncompressed body size.
- // If both sizes are known, the uncompressed
- // body size should be used.
- MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
-
- // MessagingMessageConversationIDKey is the attribute Key conforming to the
- // "messaging.message.conversation_id" semantic conventions. It represents
- // the conversation ID identifying the conversation to which the message
- // belongs, represented as a string. Sometimes called "Correlation ID".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MyConversationID'
- MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
-
- // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
- // "messaging.message.envelope.size" semantic conventions. It represents
- // the size of the message body and metadata in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2738
- // Note: This can refer to both the compressed or uncompressed size. If
- // both sizes are known, the uncompressed
- // size should be used.
- MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
-
- // MessagingMessageIDKey is the attribute Key conforming to the
- // "messaging.message.id" semantic conventions. It represents a value used
- // by the messaging system as an identifier for the message, represented as
- // a string.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '452a7c7c7c7048c2f887f61572b18fc2'
- MessagingMessageIDKey = attribute.Key("messaging.message.id")
-
- // MessagingOperationNameKey is the attribute Key conforming to the
- // "messaging.operation.name" semantic conventions. It represents the
- // system-specific name of the messaging operation.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack', 'nack', 'send'
- MessagingOperationNameKey = attribute.Key("messaging.operation.name")
-
- // MessagingOperationTypeKey is the attribute Key conforming to the
- // "messaging.operation.type" semantic conventions. It represents a string
- // identifying the type of the messaging operation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: If a custom value is used, it MUST be of low cardinality.
- MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
-
- // MessagingSystemKey is the attribute Key conforming to the
- // "messaging.system" semantic conventions. It represents the messaging
- // system as identified by the client instrumentation.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The actual messaging system may differ from the one known by the
- // client. For example, when using Kafka client libraries to communicate
- // with Azure Event Hubs, the `messaging.system` is set to `kafka` based on
- // the instrumentation's best knowledge.
- MessagingSystemKey = attribute.Key("messaging.system")
-)
-
-var (
- // One or more messages are provided for publishing to an intermediary. If a single message is published, the context of the "Publish" span can be used as the creation context and no "Create" span needs to be created
- MessagingOperationTypePublish = MessagingOperationTypeKey.String("publish")
- // A message is created. "Create" spans always refer to a single message and are used to provide a unique creation context for messages in batch publishing scenarios
- MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
- // One or more messages are requested by a consumer. This operation refers to pull-based scenarios, where consumers explicitly call methods of messaging SDKs to receive messages
- MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
- // One or more messages are delivered to or processed by a consumer
- MessagingOperationTypeDeliver = MessagingOperationTypeKey.String("process")
- // One or more messages are settled
- MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
-)
-
-var (
- // Apache ActiveMQ
- MessagingSystemActivemq = MessagingSystemKey.String("activemq")
- // Amazon Simple Queue Service (SQS)
- MessagingSystemAWSSqs = MessagingSystemKey.String("aws_sqs")
- // Azure Event Grid
- MessagingSystemEventgrid = MessagingSystemKey.String("eventgrid")
- // Azure Event Hubs
- MessagingSystemEventhubs = MessagingSystemKey.String("eventhubs")
- // Azure Service Bus
- MessagingSystemServicebus = MessagingSystemKey.String("servicebus")
- // Google Cloud Pub/Sub
- MessagingSystemGCPPubsub = MessagingSystemKey.String("gcp_pubsub")
- // Java Message Service
- MessagingSystemJms = MessagingSystemKey.String("jms")
- // Apache Kafka
- MessagingSystemKafka = MessagingSystemKey.String("kafka")
- // RabbitMQ
- MessagingSystemRabbitmq = MessagingSystemKey.String("rabbitmq")
- // Apache RocketMQ
- MessagingSystemRocketmq = MessagingSystemKey.String("rocketmq")
-)
-
-// MessagingBatchMessageCount returns an attribute KeyValue conforming to
-// the "messaging.batch.message_count" semantic conventions. It represents the
-// number of messages sent, received, or processed in the scope of the batching
-// operation.
-func MessagingBatchMessageCount(val int) attribute.KeyValue {
- return MessagingBatchMessageCountKey.Int(val)
-}
-
-// MessagingClientID returns an attribute KeyValue conforming to the
-// "messaging.client.id" semantic conventions. It represents a unique
-// identifier for the client that consumes or produces a message.
-func MessagingClientID(val string) attribute.KeyValue {
- return MessagingClientIDKey.String(val)
-}
-
-// MessagingDestinationAnonymous returns an attribute KeyValue conforming to
-// the "messaging.destination.anonymous" semantic conventions. It represents a
-// boolean that is true if the message destination is anonymous (could be
-// unnamed or have auto-generated name).
-func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationName returns an attribute KeyValue conforming to the
-// "messaging.destination.name" semantic conventions. It represents the message
-// destination name
-func MessagingDestinationName(val string) attribute.KeyValue {
- return MessagingDestinationNameKey.String(val)
-}
-
-// MessagingDestinationPartitionID returns an attribute KeyValue conforming
-// to the "messaging.destination.partition.id" semantic conventions. It
-// represents the identifier of the partition messages are sent to or received
-// from, unique within the `messaging.destination.name`.
-func MessagingDestinationPartitionID(val string) attribute.KeyValue {
- return MessagingDestinationPartitionIDKey.String(val)
-}
-
-// MessagingDestinationTemplate returns an attribute KeyValue conforming to
-// the "messaging.destination.template" semantic conventions. It represents the
-// low cardinality representation of the messaging destination name
-func MessagingDestinationTemplate(val string) attribute.KeyValue {
- return MessagingDestinationTemplateKey.String(val)
-}
-
-// MessagingDestinationTemporary returns an attribute KeyValue conforming to
-// the "messaging.destination.temporary" semantic conventions. It represents a
-// boolean that is true if the message destination is temporary and might not
-// exist anymore after messages are processed.
-func MessagingDestinationTemporary(val bool) attribute.KeyValue {
- return MessagingDestinationTemporaryKey.Bool(val)
-}
-
-// MessagingDestinationPublishAnonymous returns an attribute KeyValue
-// conforming to the "messaging.destination_publish.anonymous" semantic
-// conventions. It represents a boolean that is true if the publish message
-// destination is anonymous (could be unnamed or have auto-generated name).
-func MessagingDestinationPublishAnonymous(val bool) attribute.KeyValue {
- return MessagingDestinationPublishAnonymousKey.Bool(val)
-}
-
-// MessagingDestinationPublishName returns an attribute KeyValue conforming
-// to the "messaging.destination_publish.name" semantic conventions. It
-// represents the name of the original destination the message was published to
-func MessagingDestinationPublishName(val string) attribute.KeyValue {
- return MessagingDestinationPublishNameKey.String(val)
-}
-
-// MessagingMessageBodySize returns an attribute KeyValue conforming to the
-// "messaging.message.body.size" semantic conventions. It represents the size
-// of the message body in bytes.
-func MessagingMessageBodySize(val int) attribute.KeyValue {
- return MessagingMessageBodySizeKey.Int(val)
-}
-
-// MessagingMessageConversationID returns an attribute KeyValue conforming
-// to the "messaging.message.conversation_id" semantic conventions. It
-// represents the conversation ID identifying the conversation to which the
-// message belongs, represented as a string. Sometimes called "Correlation ID".
-func MessagingMessageConversationID(val string) attribute.KeyValue {
- return MessagingMessageConversationIDKey.String(val)
-}
-
-// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to
-// the "messaging.message.envelope.size" semantic conventions. It represents
-// the size of the message body and metadata in bytes.
-func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
- return MessagingMessageEnvelopeSizeKey.Int(val)
-}
-
-// MessagingMessageID returns an attribute KeyValue conforming to the
-// "messaging.message.id" semantic conventions. It represents a value used by
-// the messaging system as an identifier for the message, represented as a
-// string.
-func MessagingMessageID(val string) attribute.KeyValue {
- return MessagingMessageIDKey.String(val)
-}
-
-// MessagingOperationName returns an attribute KeyValue conforming to the
-// "messaging.operation.name" semantic conventions. It represents the
-// system-specific name of the messaging operation.
-func MessagingOperationName(val string) attribute.KeyValue {
- return MessagingOperationNameKey.String(val)
-}
-
-// This group describes attributes specific to Apache Kafka.
-const (
- // MessagingKafkaConsumerGroupKey is the attribute Key conforming to the
- // "messaging.kafka.consumer.group" semantic conventions. It represents the
- // name of the Kafka Consumer Group that is handling the message. Only
- // applies to consumers, not producers.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'my-group'
- MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer.group")
-
- // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
- // "messaging.kafka.message.key" semantic conventions. It represents the
- // message keys in Kafka are used for grouping alike messages to ensure
- // they're processed on the same partition. They differ from
- // `messaging.message.id` in that they're not unique. If the key is `null`,
- // the attribute MUST NOT be set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- // Note: If the key type is not string, it's string representation has to
- // be supplied for the attribute. If the key has no unambiguous, canonical
- // string form, don't include its value.
- MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
-
- // MessagingKafkaMessageOffsetKey is the attribute Key conforming to the
- // "messaging.kafka.message.offset" semantic conventions. It represents the
- // offset of a record in the corresponding Kafka partition.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- MessagingKafkaMessageOffsetKey = attribute.Key("messaging.kafka.message.offset")
-
- // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
- // "messaging.kafka.message.tombstone" semantic conventions. It represents
- // a boolean that is true if the message is a tombstone.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
-)
-
-// MessagingKafkaConsumerGroup returns an attribute KeyValue conforming to
-// the "messaging.kafka.consumer.group" semantic conventions. It represents the
-// name of the Kafka Consumer Group that is handling the message. Only applies
-// to consumers, not producers.
-func MessagingKafkaConsumerGroup(val string) attribute.KeyValue {
- return MessagingKafkaConsumerGroupKey.String(val)
-}
-
-// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
-// "messaging.kafka.message.key" semantic conventions. It represents the
-// message keys in Kafka are used for grouping alike messages to ensure they're
-// processed on the same partition. They differ from `messaging.message.id` in
-// that they're not unique. If the key is `null`, the attribute MUST NOT be
-// set.
-func MessagingKafkaMessageKey(val string) attribute.KeyValue {
- return MessagingKafkaMessageKeyKey.String(val)
-}
-
-// MessagingKafkaMessageOffset returns an attribute KeyValue conforming to
-// the "messaging.kafka.message.offset" semantic conventions. It represents the
-// offset of a record in the corresponding Kafka partition.
-func MessagingKafkaMessageOffset(val int) attribute.KeyValue {
- return MessagingKafkaMessageOffsetKey.Int(val)
-}
-
-// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming
-// to the "messaging.kafka.message.tombstone" semantic conventions. It
-// represents a boolean that is true if the message is a tombstone.
-func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
- return MessagingKafkaMessageTombstoneKey.Bool(val)
-}
-
-// This group describes attributes specific to RabbitMQ.
-const (
- // MessagingRabbitmqDestinationRoutingKeyKey is the attribute Key
- // conforming to the "messaging.rabbitmq.destination.routing_key" semantic
- // conventions. It represents the rabbitMQ message routing key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myKey'
- MessagingRabbitmqDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
-
- // MessagingRabbitmqMessageDeliveryTagKey is the attribute Key conforming
- // to the "messaging.rabbitmq.message.delivery_tag" semantic conventions.
- // It represents the rabbitMQ message delivery tag
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 123
- MessagingRabbitmqMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
-)
-
-// MessagingRabbitmqDestinationRoutingKey returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
-// conventions. It represents the rabbitMQ message routing key.
-func MessagingRabbitmqDestinationRoutingKey(val string) attribute.KeyValue {
- return MessagingRabbitmqDestinationRoutingKeyKey.String(val)
-}
-
-// MessagingRabbitmqMessageDeliveryTag returns an attribute KeyValue
-// conforming to the "messaging.rabbitmq.message.delivery_tag" semantic
-// conventions. It represents the rabbitMQ message delivery tag
-func MessagingRabbitmqMessageDeliveryTag(val int) attribute.KeyValue {
- return MessagingRabbitmqMessageDeliveryTagKey.Int(val)
-}
-
-// This group describes attributes specific to RocketMQ.
-const (
- // MessagingRocketmqClientGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.client_group" semantic conventions. It represents
- // the name of the RocketMQ producer/consumer group that is handling the
- // message. The client type is identified by the SpanKind.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myConsumerGroup'
- MessagingRocketmqClientGroupKey = attribute.Key("messaging.rocketmq.client_group")
-
- // MessagingRocketmqConsumptionModelKey is the attribute Key conforming to
- // the "messaging.rocketmq.consumption_model" semantic conventions. It
- // represents the model of message consumption. This only applies to
- // consumer spans.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
-
- // MessagingRocketmqMessageDelayTimeLevelKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delay_time_level" semantic
- // conventions. It represents the delay time level for delay message, which
- // determines the message delay time.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3
- MessagingRocketmqMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
-
- // MessagingRocketmqMessageDeliveryTimestampKey is the attribute Key
- // conforming to the "messaging.rocketmq.message.delivery_timestamp"
- // semantic conventions. It represents the timestamp in milliseconds that
- // the delay message is expected to be delivered to consumer.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1665987217045
- MessagingRocketmqMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
-
- // MessagingRocketmqMessageGroupKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.group" semantic conventions. It represents
- // the it is essential for FIFO message. Messages that belong to the same
- // message group are always processed one by one within the same consumer
- // group.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myMessageGroup'
- MessagingRocketmqMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
-
- // MessagingRocketmqMessageKeysKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.keys" semantic conventions. It represents
- // the key(s) of message, another way to mark message besides message id.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'keyA', 'keyB'
- MessagingRocketmqMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
-
- // MessagingRocketmqMessageTagKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.tag" semantic conventions. It represents the
- // secondary classifier of message besides topic.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'tagA'
- MessagingRocketmqMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
-
- // MessagingRocketmqMessageTypeKey is the attribute Key conforming to the
- // "messaging.rocketmq.message.type" semantic conventions. It represents
- // the type of message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingRocketmqMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
-
- // MessagingRocketmqNamespaceKey is the attribute Key conforming to the
- // "messaging.rocketmq.namespace" semantic conventions. It represents the
- // namespace of RocketMQ resources, resources in different namespaces are
- // individual.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myNamespace'
- MessagingRocketmqNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
-)
-
-var (
- // Clustering consumption model
- MessagingRocketmqConsumptionModelClustering = MessagingRocketmqConsumptionModelKey.String("clustering")
- // Broadcasting consumption model
- MessagingRocketmqConsumptionModelBroadcasting = MessagingRocketmqConsumptionModelKey.String("broadcasting")
-)
-
-var (
- // Normal message
- MessagingRocketmqMessageTypeNormal = MessagingRocketmqMessageTypeKey.String("normal")
- // FIFO message
- MessagingRocketmqMessageTypeFifo = MessagingRocketmqMessageTypeKey.String("fifo")
- // Delay message
- MessagingRocketmqMessageTypeDelay = MessagingRocketmqMessageTypeKey.String("delay")
- // Transaction message
- MessagingRocketmqMessageTypeTransaction = MessagingRocketmqMessageTypeKey.String("transaction")
-)
-
-// MessagingRocketmqClientGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.client_group" semantic conventions. It represents
-// the name of the RocketMQ producer/consumer group that is handling the
-// message. The client type is identified by the SpanKind.
-func MessagingRocketmqClientGroup(val string) attribute.KeyValue {
- return MessagingRocketmqClientGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageDelayTimeLevel returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
-// conventions. It represents the delay time level for delay message, which
-// determines the message delay time.
-func MessagingRocketmqMessageDelayTimeLevel(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDelayTimeLevelKey.Int(val)
-}
-
-// MessagingRocketmqMessageDeliveryTimestamp returns an attribute KeyValue
-// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
-// conventions. It represents the timestamp in milliseconds that the delay
-// message is expected to be delivered to consumer.
-func MessagingRocketmqMessageDeliveryTimestamp(val int) attribute.KeyValue {
- return MessagingRocketmqMessageDeliveryTimestampKey.Int(val)
-}
-
-// MessagingRocketmqMessageGroup returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.group" semantic conventions. It represents
-// the it is essential for FIFO message. Messages that belong to the same
-// message group are always processed one by one within the same consumer
-// group.
-func MessagingRocketmqMessageGroup(val string) attribute.KeyValue {
- return MessagingRocketmqMessageGroupKey.String(val)
-}
-
-// MessagingRocketmqMessageKeys returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.keys" semantic conventions. It represents
-// the key(s) of message, another way to mark message besides message id.
-func MessagingRocketmqMessageKeys(val ...string) attribute.KeyValue {
- return MessagingRocketmqMessageKeysKey.StringSlice(val)
-}
-
-// MessagingRocketmqMessageTag returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.message.tag" semantic conventions. It represents the
-// secondary classifier of message besides topic.
-func MessagingRocketmqMessageTag(val string) attribute.KeyValue {
- return MessagingRocketmqMessageTagKey.String(val)
-}
-
-// MessagingRocketmqNamespace returns an attribute KeyValue conforming to
-// the "messaging.rocketmq.namespace" semantic conventions. It represents the
-// namespace of RocketMQ resources, resources in different namespaces are
-// individual.
-func MessagingRocketmqNamespace(val string) attribute.KeyValue {
- return MessagingRocketmqNamespaceKey.String(val)
-}
-
-// This group describes attributes specific to GCP Pub/Sub.
-const (
- // MessagingGCPPubsubMessageAckDeadlineKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions.
- // It represents the ack deadline in seconds set for the modify ack
- // deadline request.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 10
- MessagingGCPPubsubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
-
- // MessagingGCPPubsubMessageAckIDKey is the attribute Key conforming to the
- // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
- // represents the ack id for a given message.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ack_id'
- MessagingGCPPubsubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
-
- // MessagingGCPPubsubMessageDeliveryAttemptKey is the attribute Key
- // conforming to the "messaging.gcp_pubsub.message.delivery_attempt"
- // semantic conventions. It represents the delivery attempt for a given
- // message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingGCPPubsubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
-
- // MessagingGCPPubsubMessageOrderingKeyKey is the attribute Key conforming
- // to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions.
- // It represents the ordering key for a given message. If the attribute is
- // not present, the message does not have an ordering key.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ordering_key'
- MessagingGCPPubsubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
-)
-
-// MessagingGCPPubsubMessageAckDeadline returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ack_deadline" semantic
-// conventions. It represents the ack deadline in seconds set for the modify
-// ack deadline request.
-func MessagingGCPPubsubMessageAckDeadline(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckDeadlineKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageAckID returns an attribute KeyValue conforming
-// to the "messaging.gcp_pubsub.message.ack_id" semantic conventions. It
-// represents the ack id for a given message.
-func MessagingGCPPubsubMessageAckID(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageAckIDKey.String(val)
-}
-
-// MessagingGCPPubsubMessageDeliveryAttempt returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
-// conventions. It represents the delivery attempt for a given message.
-func MessagingGCPPubsubMessageDeliveryAttempt(val int) attribute.KeyValue {
- return MessagingGCPPubsubMessageDeliveryAttemptKey.Int(val)
-}
-
-// MessagingGCPPubsubMessageOrderingKey returns an attribute KeyValue
-// conforming to the "messaging.gcp_pubsub.message.ordering_key" semantic
-// conventions. It represents the ordering key for a given message. If the
-// attribute is not present, the message does not have an ordering key.
-func MessagingGCPPubsubMessageOrderingKey(val string) attribute.KeyValue {
- return MessagingGCPPubsubMessageOrderingKeyKey.String(val)
-}
-
-// This group describes attributes specific to Azure Service Bus.
-const (
- // MessagingServicebusDestinationSubscriptionNameKey is the attribute Key
- // conforming to the "messaging.servicebus.destination.subscription_name"
- // semantic conventions. It represents the name of the subscription in the
- // topic messages are received from.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'mySubscription'
- MessagingServicebusDestinationSubscriptionNameKey = attribute.Key("messaging.servicebus.destination.subscription_name")
-
- // MessagingServicebusDispositionStatusKey is the attribute Key conforming
- // to the "messaging.servicebus.disposition_status" semantic conventions.
- // It represents the describes the [settlement
- // type](https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- MessagingServicebusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
-
- // MessagingServicebusMessageDeliveryCountKey is the attribute Key
- // conforming to the "messaging.servicebus.message.delivery_count" semantic
- // conventions. It represents the number of deliveries that have been
- // attempted for this message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 2
- MessagingServicebusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
-
- // MessagingServicebusMessageEnqueuedTimeKey is the attribute Key
- // conforming to the "messaging.servicebus.message.enqueued_time" semantic
- // conventions. It represents the UTC epoch seconds at which the message
- // has been accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingServicebusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
-)
-
-var (
- // Message is completed
- MessagingServicebusDispositionStatusComplete = MessagingServicebusDispositionStatusKey.String("complete")
- // Message is abandoned
- MessagingServicebusDispositionStatusAbandon = MessagingServicebusDispositionStatusKey.String("abandon")
- // Message is sent to dead letter queue
- MessagingServicebusDispositionStatusDeadLetter = MessagingServicebusDispositionStatusKey.String("dead_letter")
- // Message is deferred
- MessagingServicebusDispositionStatusDefer = MessagingServicebusDispositionStatusKey.String("defer")
-)
-
-// MessagingServicebusDestinationSubscriptionName returns an attribute
-// KeyValue conforming to the
-// "messaging.servicebus.destination.subscription_name" semantic conventions.
-// It represents the name of the subscription in the topic messages are
-// received from.
-func MessagingServicebusDestinationSubscriptionName(val string) attribute.KeyValue {
- return MessagingServicebusDestinationSubscriptionNameKey.String(val)
-}
-
-// MessagingServicebusMessageDeliveryCount returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.delivery_count" semantic
-// conventions. It represents the number of deliveries that have been attempted
-// for this message.
-func MessagingServicebusMessageDeliveryCount(val int) attribute.KeyValue {
- return MessagingServicebusMessageDeliveryCountKey.Int(val)
-}
-
-// MessagingServicebusMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.servicebus.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingServicebusMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingServicebusMessageEnqueuedTimeKey.Int(val)
-}
-
-// This group describes attributes specific to Azure Event Hubs.
-const (
- // MessagingEventhubsConsumerGroupKey is the attribute Key conforming to
- // the "messaging.eventhubs.consumer.group" semantic conventions. It
- // represents the name of the consumer group the event consumer is
- // associated with.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'indexer'
- MessagingEventhubsConsumerGroupKey = attribute.Key("messaging.eventhubs.consumer.group")
-
- // MessagingEventhubsMessageEnqueuedTimeKey is the attribute Key conforming
- // to the "messaging.eventhubs.message.enqueued_time" semantic conventions.
- // It represents the UTC epoch seconds at which the message has been
- // accepted and stored in the entity.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1701393730
- MessagingEventhubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
-)
-
-// MessagingEventhubsConsumerGroup returns an attribute KeyValue conforming
-// to the "messaging.eventhubs.consumer.group" semantic conventions. It
-// represents the name of the consumer group the event consumer is associated
-// with.
-func MessagingEventhubsConsumerGroup(val string) attribute.KeyValue {
- return MessagingEventhubsConsumerGroupKey.String(val)
-}
-
-// MessagingEventhubsMessageEnqueuedTime returns an attribute KeyValue
-// conforming to the "messaging.eventhubs.message.enqueued_time" semantic
-// conventions. It represents the UTC epoch seconds at which the message has
-// been accepted and stored in the entity.
-func MessagingEventhubsMessageEnqueuedTime(val int) attribute.KeyValue {
- return MessagingEventhubsMessageEnqueuedTimeKey.Int(val)
-}
-
-// These attributes may be used for any network related operation.
-const (
- // NetworkCarrierIccKey is the attribute Key conforming to the
- // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
- // alpha-2 2-character country code associated with the mobile carrier
- // network.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'DE'
- NetworkCarrierIccKey = attribute.Key("network.carrier.icc")
-
- // NetworkCarrierMccKey is the attribute Key conforming to the
- // "network.carrier.mcc" semantic conventions. It represents the mobile
- // carrier country code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '310'
- NetworkCarrierMccKey = attribute.Key("network.carrier.mcc")
-
- // NetworkCarrierMncKey is the attribute Key conforming to the
- // "network.carrier.mnc" semantic conventions. It represents the mobile
- // carrier network code.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '001'
- NetworkCarrierMncKey = attribute.Key("network.carrier.mnc")
-
- // NetworkCarrierNameKey is the attribute Key conforming to the
- // "network.carrier.name" semantic conventions. It represents the name of
- // the mobile carrier.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'sprint'
- NetworkCarrierNameKey = attribute.Key("network.carrier.name")
-
- // NetworkConnectionSubtypeKey is the attribute Key conforming to the
- // "network.connection.subtype" semantic conventions. It represents the
- // this describes more details regarding the connection.type. It may be the
- // type of cell technology connection, but it could be used for describing
- // details about a wifi connection.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'LTE'
- NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
-
- // NetworkConnectionTypeKey is the attribute Key conforming to the
- // "network.connection.type" semantic conventions. It represents the
- // internet connection type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'wifi'
- NetworkConnectionTypeKey = attribute.Key("network.connection.type")
-
- // NetworkIoDirectionKey is the attribute Key conforming to the
- // "network.io.direction" semantic conventions. It represents the network
- // IO operation direction.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'transmit'
- NetworkIoDirectionKey = attribute.Key("network.io.direction")
-
- // NetworkLocalAddressKey is the attribute Key conforming to the
- // "network.local.address" semantic conventions. It represents the local
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkLocalAddressKey = attribute.Key("network.local.address")
-
- // NetworkLocalPortKey is the attribute Key conforming to the
- // "network.local.port" semantic conventions. It represents the local port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkLocalPortKey = attribute.Key("network.local.port")
-
- // NetworkPeerAddressKey is the attribute Key conforming to the
- // "network.peer.address" semantic conventions. It represents the peer
- // address of the network connection - IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '10.1.2.80', '/tmp/my.sock'
- NetworkPeerAddressKey = attribute.Key("network.peer.address")
-
- // NetworkPeerPortKey is the attribute Key conforming to the
- // "network.peer.port" semantic conventions. It represents the peer port
- // number of the network connection.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 65123
- NetworkPeerPortKey = attribute.Key("network.peer.port")
-
- // NetworkProtocolNameKey is the attribute Key conforming to the
- // "network.protocol.name" semantic conventions. It represents the [OSI
- // application layer](https://osi-model.com/application-layer/) or non-OSI
- // equivalent.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'amqp', 'http', 'mqtt'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkProtocolNameKey = attribute.Key("network.protocol.name")
-
- // NetworkProtocolVersionKey is the attribute Key conforming to the
- // "network.protocol.version" semantic conventions. It represents the
- // actual version of the protocol used for network communication.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.1', '2'
- // Note: If protocol version is subject to negotiation (for example using
- // [ALPN](https://www.rfc-editor.org/rfc/rfc7301.html)), this attribute
- // SHOULD be set to the negotiated version. If the actual protocol version
- // is not known, this attribute SHOULD NOT be set.
- NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
-
- // NetworkTransportKey is the attribute Key conforming to the
- // "network.transport" semantic conventions. It represents the [OSI
- // transport layer](https://osi-model.com/transport-layer/) or
- // [inter-process communication
- // method](https://wikipedia.org/wiki/Inter-process_communication).
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'tcp', 'udp'
- // Note: The value SHOULD be normalized to lowercase.
- //
- // Consider always setting the transport when setting a port number, since
- // a port number is ambiguous without knowing the transport. For example
- // different processes could be listening on TCP port 12345 and UDP port
- // 12345.
- NetworkTransportKey = attribute.Key("network.transport")
-
- // NetworkTypeKey is the attribute Key conforming to the "network.type"
- // semantic conventions. It represents the [OSI network
- // layer](https://osi-model.com/network-layer/) or non-OSI equivalent.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'ipv4', 'ipv6'
- // Note: The value SHOULD be normalized to lowercase.
- NetworkTypeKey = attribute.Key("network.type")
-)
-
-var (
- // GPRS
- NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
- // EDGE
- NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
- // UMTS
- NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
- // CDMA
- NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
- // EVDO Rel. 0
- NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
- // EVDO Rev. A
- NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
- // CDMA2000 1XRTT
- NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
- // HSDPA
- NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
- // HSUPA
- NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
- // HSPA
- NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
- // IDEN
- NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
- // EVDO Rev. B
- NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
- // LTE
- NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
- // EHRPD
- NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
- // HSPAP
- NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
- // GSM
- NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
- // TD-SCDMA
- NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
- // IWLAN
- NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
- // 5G NR (New Radio)
- NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
- // 5G NRNSA (New Radio Non-Standalone)
- NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
- // LTE CA
- NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
-)
-
-var (
- // wifi
- NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
- // wired
- NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
- // cell
- NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
- // unavailable
- NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
- // unknown
- NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
-)
-
-var (
- // transmit
- NetworkIoDirectionTransmit = NetworkIoDirectionKey.String("transmit")
- // receive
- NetworkIoDirectionReceive = NetworkIoDirectionKey.String("receive")
-)
-
-var (
- // TCP
- NetworkTransportTCP = NetworkTransportKey.String("tcp")
- // UDP
- NetworkTransportUDP = NetworkTransportKey.String("udp")
- // Named or anonymous pipe
- NetworkTransportPipe = NetworkTransportKey.String("pipe")
- // Unix domain socket
- NetworkTransportUnix = NetworkTransportKey.String("unix")
-)
-
-var (
- // IPv4
- NetworkTypeIpv4 = NetworkTypeKey.String("ipv4")
- // IPv6
- NetworkTypeIpv6 = NetworkTypeKey.String("ipv6")
-)
-
-// NetworkCarrierIcc returns an attribute KeyValue conforming to the
-// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
-// alpha-2 2-character country code associated with the mobile carrier network.
-func NetworkCarrierIcc(val string) attribute.KeyValue {
- return NetworkCarrierIccKey.String(val)
-}
-
-// NetworkCarrierMcc returns an attribute KeyValue conforming to the
-// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
-// country code.
-func NetworkCarrierMcc(val string) attribute.KeyValue {
- return NetworkCarrierMccKey.String(val)
-}
-
-// NetworkCarrierMnc returns an attribute KeyValue conforming to the
-// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
-// network code.
-func NetworkCarrierMnc(val string) attribute.KeyValue {
- return NetworkCarrierMncKey.String(val)
-}
-
-// NetworkCarrierName returns an attribute KeyValue conforming to the
-// "network.carrier.name" semantic conventions. It represents the name of the
-// mobile carrier.
-func NetworkCarrierName(val string) attribute.KeyValue {
- return NetworkCarrierNameKey.String(val)
-}
-
-// NetworkLocalAddress returns an attribute KeyValue conforming to the
-// "network.local.address" semantic conventions. It represents the local
-// address of the network connection - IP address or Unix domain socket name.
-func NetworkLocalAddress(val string) attribute.KeyValue {
- return NetworkLocalAddressKey.String(val)
-}
-
-// NetworkLocalPort returns an attribute KeyValue conforming to the
-// "network.local.port" semantic conventions. It represents the local port
-// number of the network connection.
-func NetworkLocalPort(val int) attribute.KeyValue {
- return NetworkLocalPortKey.Int(val)
-}
-
-// NetworkPeerAddress returns an attribute KeyValue conforming to the
-// "network.peer.address" semantic conventions. It represents the peer address
-// of the network connection - IP address or Unix domain socket name.
-func NetworkPeerAddress(val string) attribute.KeyValue {
- return NetworkPeerAddressKey.String(val)
-}
-
-// NetworkPeerPort returns an attribute KeyValue conforming to the
-// "network.peer.port" semantic conventions. It represents the peer port number
-// of the network connection.
-func NetworkPeerPort(val int) attribute.KeyValue {
- return NetworkPeerPortKey.Int(val)
-}
-
-// NetworkProtocolName returns an attribute KeyValue conforming to the
-// "network.protocol.name" semantic conventions. It represents the [OSI
-// application layer](https://osi-model.com/application-layer/) or non-OSI
-// equivalent.
-func NetworkProtocolName(val string) attribute.KeyValue {
- return NetworkProtocolNameKey.String(val)
-}
-
-// NetworkProtocolVersion returns an attribute KeyValue conforming to the
-// "network.protocol.version" semantic conventions. It represents the actual
-// version of the protocol used for network communication.
-func NetworkProtocolVersion(val string) attribute.KeyValue {
- return NetworkProtocolVersionKey.String(val)
-}
-
-// An OCI image manifest.
-const (
- // OciManifestDigestKey is the attribute Key conforming to the
- // "oci.manifest.digest" semantic conventions. It represents the digest of
- // the OCI image manifest. For container images specifically is the digest
- // by which the container image is known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // 'sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4'
- // Note: Follows [OCI Image Manifest
- // Specification](https://github.com/opencontainers/image-spec/blob/main/manifest.md),
- // and specifically the [Digest
- // property](https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests).
- // An example can be found in [Example Image
- // Manifest](https://docs.docker.com/registry/spec/manifest-v2-2/#example-image-manifest).
- OciManifestDigestKey = attribute.Key("oci.manifest.digest")
-)
-
-// OciManifestDigest returns an attribute KeyValue conforming to the
-// "oci.manifest.digest" semantic conventions. It represents the digest of the
-// OCI image manifest. For container images specifically is the digest by which
-// the container image is known.
-func OciManifestDigest(val string) attribute.KeyValue {
- return OciManifestDigestKey.String(val)
-}
-
-// Attributes used by the OpenTracing Shim layer.
-const (
- // OpentracingRefTypeKey is the attribute Key conforming to the
- // "opentracing.ref_type" semantic conventions. It represents the
- // parent-child Reference type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: The causal relationship between a child Span and a parent Span.
- OpentracingRefTypeKey = attribute.Key("opentracing.ref_type")
-)
-
-var (
- // The parent Span depends on the child Span in some capacity
- OpentracingRefTypeChildOf = OpentracingRefTypeKey.String("child_of")
- // The parent Span doesn't depend in any way on the result of the child Span
- OpentracingRefTypeFollowsFrom = OpentracingRefTypeKey.String("follows_from")
-)
-
-// The operating system (OS) on which the process represented by this resource
-// is running.
-const (
- // OSBuildIDKey is the attribute Key conforming to the "os.build_id"
- // semantic conventions. It represents the unique identifier for a
- // particular build or compilation of the operating system.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TQ3C.230805.001.B2', '20E247', '22621'
- OSBuildIDKey = attribute.Key("os.build_id")
-
- // OSDescriptionKey is the attribute Key conforming to the "os.description"
- // semantic conventions. It represents the human readable (not intended to
- // be parsed) OS version information, like e.g. reported by `ver` or
- // `lsb_release -a` commands.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1
- // LTS'
- OSDescriptionKey = attribute.Key("os.description")
-
- // OSNameKey is the attribute Key conforming to the "os.name" semantic
- // conventions. It represents the human readable operating system name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'iOS', 'Android', 'Ubuntu'
- OSNameKey = attribute.Key("os.name")
-
- // OSTypeKey is the attribute Key conforming to the "os.type" semantic
- // conventions. It represents the operating system type.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- OSTypeKey = attribute.Key("os.type")
-
- // OSVersionKey is the attribute Key conforming to the "os.version"
- // semantic conventions. It represents the version string of the operating
- // system as defined in [Version
- // Attributes](/docs/resource/README.md#version-attributes).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.2.1', '18.04.1'
- OSVersionKey = attribute.Key("os.version")
-)
-
-var (
- // Microsoft Windows
- OSTypeWindows = OSTypeKey.String("windows")
- // Linux
- OSTypeLinux = OSTypeKey.String("linux")
- // Apple Darwin
- OSTypeDarwin = OSTypeKey.String("darwin")
- // FreeBSD
- OSTypeFreeBSD = OSTypeKey.String("freebsd")
- // NetBSD
- OSTypeNetBSD = OSTypeKey.String("netbsd")
- // OpenBSD
- OSTypeOpenBSD = OSTypeKey.String("openbsd")
- // DragonFly BSD
- OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
- // HP-UX (Hewlett Packard Unix)
- OSTypeHPUX = OSTypeKey.String("hpux")
- // AIX (Advanced Interactive eXecutive)
- OSTypeAIX = OSTypeKey.String("aix")
- // SunOS, Oracle Solaris
- OSTypeSolaris = OSTypeKey.String("solaris")
- // IBM z/OS
- OSTypeZOS = OSTypeKey.String("z_os")
-)
-
-// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
-// semantic conventions. It represents the unique identifier for a particular
-// build or compilation of the operating system.
-func OSBuildID(val string) attribute.KeyValue {
- return OSBuildIDKey.String(val)
-}
-
-// OSDescription returns an attribute KeyValue conforming to the
-// "os.description" semantic conventions. It represents the human readable (not
-// intended to be parsed) OS version information, like e.g. reported by `ver`
-// or `lsb_release -a` commands.
-func OSDescription(val string) attribute.KeyValue {
- return OSDescriptionKey.String(val)
-}
-
-// OSName returns an attribute KeyValue conforming to the "os.name" semantic
-// conventions. It represents the human readable operating system name.
-func OSName(val string) attribute.KeyValue {
- return OSNameKey.String(val)
-}
-
-// OSVersion returns an attribute KeyValue conforming to the "os.version"
-// semantic conventions. It represents the version string of the operating
-// system as defined in [Version
-// Attributes](/docs/resource/README.md#version-attributes).
-func OSVersion(val string) attribute.KeyValue {
- return OSVersionKey.String(val)
-}
-
-// Attributes reserved for OpenTelemetry
-const (
- // OTelStatusCodeKey is the attribute Key conforming to the
- // "otel.status_code" semantic conventions. It represents the name of the
- // code, either "OK" or "ERROR". MUST NOT be set if the status code is
- // UNSET.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- OTelStatusCodeKey = attribute.Key("otel.status_code")
-
- // OTelStatusDescriptionKey is the attribute Key conforming to the
- // "otel.status_description" semantic conventions. It represents the
- // description of the Status if it has a value, otherwise not set.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'resource not found'
- OTelStatusDescriptionKey = attribute.Key("otel.status_description")
-)
-
-var (
- // The operation has been validated by an Application developer or Operator to have completed successfully
- OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
- // The operation contains an error
- OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
-)
-
-// OTelStatusDescription returns an attribute KeyValue conforming to the
-// "otel.status_description" semantic conventions. It represents the
-// description of the Status if it has a value, otherwise not set.
-func OTelStatusDescription(val string) attribute.KeyValue {
- return OTelStatusDescriptionKey.String(val)
-}
-
-// Attributes used by non-OTLP exporters to represent OpenTelemetry Scope's
-// concepts.
-const (
- // OTelScopeNameKey is the attribute Key conforming to the
- // "otel.scope.name" semantic conventions. It represents the name of the
- // instrumentation scope - (`InstrumentationScope.Name` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'io.opentelemetry.contrib.mongodb'
- OTelScopeNameKey = attribute.Key("otel.scope.name")
-
- // OTelScopeVersionKey is the attribute Key conforming to the
- // "otel.scope.version" semantic conventions. It represents the version of
- // the instrumentation scope - (`InstrumentationScope.Version` in OTLP).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '1.0.0'
- OTelScopeVersionKey = attribute.Key("otel.scope.version")
-)
-
-// OTelScopeName returns an attribute KeyValue conforming to the
-// "otel.scope.name" semantic conventions. It represents the name of the
-// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
-func OTelScopeName(val string) attribute.KeyValue {
- return OTelScopeNameKey.String(val)
-}
-
-// OTelScopeVersion returns an attribute KeyValue conforming to the
-// "otel.scope.version" semantic conventions. It represents the version of the
-// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
-func OTelScopeVersion(val string) attribute.KeyValue {
- return OTelScopeVersionKey.String(val)
-}
-
-// Operations that access some remote service.
-const (
- // PeerServiceKey is the attribute Key conforming to the "peer.service"
- // semantic conventions. It represents the
- // [`service.name`](/docs/resource/README.md#service) of the remote
- // service. SHOULD be equal to the actual `service.name` resource attribute
- // of the remote service if any.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'AuthTokenCache'
- PeerServiceKey = attribute.Key("peer.service")
-)
-
-// PeerService returns an attribute KeyValue conforming to the
-// "peer.service" semantic conventions. It represents the
-// [`service.name`](/docs/resource/README.md#service) of the remote service.
-// SHOULD be equal to the actual `service.name` resource attribute of the
-// remote service if any.
-func PeerService(val string) attribute.KeyValue {
- return PeerServiceKey.String(val)
-}
-
-// An operating system process.
-const (
- // ProcessCommandKey is the attribute Key conforming to the
- // "process.command" semantic conventions. It represents the command used
- // to launch the process (i.e. the command name). On Linux based systems,
- // can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can
- // be set to the first parameter extracted from `GetCommandLineW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otelcol'
- ProcessCommandKey = attribute.Key("process.command")
-
- // ProcessCommandArgsKey is the attribute Key conforming to the
- // "process.command_args" semantic conventions. It represents the all the
- // command arguments (including the command/executable itself) as received
- // by the process. On Linux-based systems (and some other Unixoid systems
- // supporting procfs), can be set according to the list of null-delimited
- // strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
- // this would be the full argv vector passed to `main`.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'cmd/otecol', '--config=config.yaml'
- ProcessCommandArgsKey = attribute.Key("process.command_args")
-
- // ProcessCommandLineKey is the attribute Key conforming to the
- // "process.command_line" semantic conventions. It represents the full
- // command used to launch the process as a single string representing the
- // full command. On Windows, can be set to the result of `GetCommandLineW`.
- // Do not set this if you have to assemble it just for monitoring; use
- // `process.command_args` instead.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"'
- ProcessCommandLineKey = attribute.Key("process.command_line")
-
- // ProcessContextSwitchTypeKey is the attribute Key conforming to the
- // "process.context_switch_type" semantic conventions. It represents the
- // specifies whether the context switches for this data point were
- // voluntary or involuntary.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
-
- // ProcessCreationTimeKey is the attribute Key conforming to the
- // "process.creation.time" semantic conventions. It represents the date and
- // time the process was created, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:25:34.853Z'
- ProcessCreationTimeKey = attribute.Key("process.creation.time")
-
- // ProcessExecutableNameKey is the attribute Key conforming to the
- // "process.executable.name" semantic conventions. It represents the name
- // of the process executable. On Linux based systems, can be set to the
- // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name
- // of `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'otelcol'
- ProcessExecutableNameKey = attribute.Key("process.executable.name")
-
- // ProcessExecutablePathKey is the attribute Key conforming to the
- // "process.executable.path" semantic conventions. It represents the full
- // path to the process executable. On Linux based systems, can be set to
- // the target of `proc/[pid]/exe`. On Windows, can be set to the result of
- // `GetProcessImageFileNameW`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/usr/bin/cmd/otelcol'
- ProcessExecutablePathKey = attribute.Key("process.executable.path")
-
- // ProcessExitCodeKey is the attribute Key conforming to the
- // "process.exit.code" semantic conventions. It represents the exit code of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 127
- ProcessExitCodeKey = attribute.Key("process.exit.code")
-
- // ProcessExitTimeKey is the attribute Key conforming to the
- // "process.exit.time" semantic conventions. It represents the date and
- // time the process exited, in ISO 8601 format.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2023-11-21T09:26:12.315Z'
- ProcessExitTimeKey = attribute.Key("process.exit.time")
-
- // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
- // "process.group_leader.pid" semantic conventions. It represents the PID
- // of the process's group leader. This is also the process group ID (PGID)
- // of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 23
- ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
-
- // ProcessInteractiveKey is the attribute Key conforming to the
- // "process.interactive" semantic conventions. It represents the whether
- // the process is connected to an interactive shell.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessInteractiveKey = attribute.Key("process.interactive")
-
- // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
- // semantic conventions. It represents the username of the user that owns
- // the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessOwnerKey = attribute.Key("process.owner")
-
- // ProcessPagingFaultTypeKey is the attribute Key conforming to the
- // "process.paging.fault_type" semantic conventions. It represents the type
- // of page fault for this data point. Type `major` is for major/hard page
- // faults, and `minor` is for minor/soft page faults.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
-
- // ProcessParentPIDKey is the attribute Key conforming to the
- // "process.parent_pid" semantic conventions. It represents the parent
- // Process identifier (PPID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 111
- ProcessParentPIDKey = attribute.Key("process.parent_pid")
-
- // ProcessPIDKey is the attribute Key conforming to the "process.pid"
- // semantic conventions. It represents the process identifier (PID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1234
- ProcessPIDKey = attribute.Key("process.pid")
-
- // ProcessRealUserIDKey is the attribute Key conforming to the
- // "process.real_user.id" semantic conventions. It represents the real user
- // ID (RUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1000
- ProcessRealUserIDKey = attribute.Key("process.real_user.id")
-
- // ProcessRealUserNameKey is the attribute Key conforming to the
- // "process.real_user.name" semantic conventions. It represents the
- // username of the real user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessRealUserNameKey = attribute.Key("process.real_user.name")
-
- // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
- // "process.runtime.description" semantic conventions. It represents an
- // additional description about the runtime of the process, for example a
- // specific vendor customization of the runtime environment.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0'
- ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
-
- // ProcessRuntimeNameKey is the attribute Key conforming to the
- // "process.runtime.name" semantic conventions. It represents the name of
- // the runtime of this process. For compiled native binaries, this SHOULD
- // be the name of the compiler.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'OpenJDK Runtime Environment'
- ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
-
- // ProcessRuntimeVersionKey is the attribute Key conforming to the
- // "process.runtime.version" semantic conventions. It represents the
- // version of the runtime of this process, as returned by the runtime
- // without modification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.0.2'
- ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
-
- // ProcessSavedUserIDKey is the attribute Key conforming to the
- // "process.saved_user.id" semantic conventions. It represents the saved
- // user ID (SUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1002
- ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
-
- // ProcessSavedUserNameKey is the attribute Key conforming to the
- // "process.saved_user.name" semantic conventions. It represents the
- // username of the saved user.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'operator'
- ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
-
- // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
- // "process.session_leader.pid" semantic conventions. It represents the PID
- // of the process's session leader. This is also the session ID (SID) of
- // the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 14
- ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
-
- // ProcessUserIDKey is the attribute Key conforming to the
- // "process.user.id" semantic conventions. It represents the effective user
- // ID (EUID) of the process.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1001
- ProcessUserIDKey = attribute.Key("process.user.id")
-
- // ProcessUserNameKey is the attribute Key conforming to the
- // "process.user.name" semantic conventions. It represents the username of
- // the effective user of the process.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'root'
- ProcessUserNameKey = attribute.Key("process.user.name")
-
- // ProcessVpidKey is the attribute Key conforming to the "process.vpid"
- // semantic conventions. It represents the virtual process identifier.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 12
- // Note: The process ID within a PID namespace. This is not necessarily
- // unique across all processes on the host but it is unique within the
- // process namespace that the process exists within.
- ProcessVpidKey = attribute.Key("process.vpid")
-)
-
-var (
- // voluntary
- ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
- // involuntary
- ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
-)
-
-var (
- // major
- ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
- // minor
- ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
-)
-
-// ProcessCommand returns an attribute KeyValue conforming to the
-// "process.command" semantic conventions. It represents the command used to
-// launch the process (i.e. the command name). On Linux based systems, can be
-// set to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to
-// the first parameter extracted from `GetCommandLineW`.
-func ProcessCommand(val string) attribute.KeyValue {
- return ProcessCommandKey.String(val)
-}
-
-// ProcessCommandArgs returns an attribute KeyValue conforming to the
-// "process.command_args" semantic conventions. It represents the all the
-// command arguments (including the command/executable itself) as received by
-// the process. On Linux-based systems (and some other Unixoid systems
-// supporting procfs), can be set according to the list of null-delimited
-// strings extracted from `proc/[pid]/cmdline`. For libc-based executables,
-// this would be the full argv vector passed to `main`.
-func ProcessCommandArgs(val ...string) attribute.KeyValue {
- return ProcessCommandArgsKey.StringSlice(val)
-}
-
-// ProcessCommandLine returns an attribute KeyValue conforming to the
-// "process.command_line" semantic conventions. It represents the full command
-// used to launch the process as a single string representing the full command.
-// On Windows, can be set to the result of `GetCommandLineW`. Do not set this
-// if you have to assemble it just for monitoring; use `process.command_args`
-// instead.
-func ProcessCommandLine(val string) attribute.KeyValue {
- return ProcessCommandLineKey.String(val)
-}
-
-// ProcessCreationTime returns an attribute KeyValue conforming to the
-// "process.creation.time" semantic conventions. It represents the date and
-// time the process was created, in ISO 8601 format.
-func ProcessCreationTime(val string) attribute.KeyValue {
- return ProcessCreationTimeKey.String(val)
-}
-
-// ProcessExecutableName returns an attribute KeyValue conforming to the
-// "process.executable.name" semantic conventions. It represents the name of
-// the process executable. On Linux based systems, can be set to the `Name` in
-// `proc/[pid]/status`. On Windows, can be set to the base name of
-// `GetProcessImageFileNameW`.
-func ProcessExecutableName(val string) attribute.KeyValue {
- return ProcessExecutableNameKey.String(val)
-}
-
-// ProcessExecutablePath returns an attribute KeyValue conforming to the
-// "process.executable.path" semantic conventions. It represents the full path
-// to the process executable. On Linux based systems, can be set to the target
-// of `proc/[pid]/exe`. On Windows, can be set to the result of
-// `GetProcessImageFileNameW`.
-func ProcessExecutablePath(val string) attribute.KeyValue {
- return ProcessExecutablePathKey.String(val)
-}
-
-// ProcessExitCode returns an attribute KeyValue conforming to the
-// "process.exit.code" semantic conventions. It represents the exit code of the
-// process.
-func ProcessExitCode(val int) attribute.KeyValue {
- return ProcessExitCodeKey.Int(val)
-}
-
-// ProcessExitTime returns an attribute KeyValue conforming to the
-// "process.exit.time" semantic conventions. It represents the date and time
-// the process exited, in ISO 8601 format.
-func ProcessExitTime(val string) attribute.KeyValue {
- return ProcessExitTimeKey.String(val)
-}
-
-// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
-// "process.group_leader.pid" semantic conventions. It represents the PID of
-// the process's group leader. This is also the process group ID (PGID) of the
-// process.
-func ProcessGroupLeaderPID(val int) attribute.KeyValue {
- return ProcessGroupLeaderPIDKey.Int(val)
-}
-
-// ProcessInteractive returns an attribute KeyValue conforming to the
-// "process.interactive" semantic conventions. It represents the whether the
-// process is connected to an interactive shell.
-func ProcessInteractive(val bool) attribute.KeyValue {
- return ProcessInteractiveKey.Bool(val)
-}
-
-// ProcessOwner returns an attribute KeyValue conforming to the
-// "process.owner" semantic conventions. It represents the username of the user
-// that owns the process.
-func ProcessOwner(val string) attribute.KeyValue {
- return ProcessOwnerKey.String(val)
-}
-
-// ProcessParentPID returns an attribute KeyValue conforming to the
-// "process.parent_pid" semantic conventions. It represents the parent Process
-// identifier (PPID).
-func ProcessParentPID(val int) attribute.KeyValue {
- return ProcessParentPIDKey.Int(val)
-}
-
-// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
-// semantic conventions. It represents the process identifier (PID).
-func ProcessPID(val int) attribute.KeyValue {
- return ProcessPIDKey.Int(val)
-}
-
-// ProcessRealUserID returns an attribute KeyValue conforming to the
-// "process.real_user.id" semantic conventions. It represents the real user ID
-// (RUID) of the process.
-func ProcessRealUserID(val int) attribute.KeyValue {
- return ProcessRealUserIDKey.Int(val)
-}
-
-// ProcessRealUserName returns an attribute KeyValue conforming to the
-// "process.real_user.name" semantic conventions. It represents the username of
-// the real user of the process.
-func ProcessRealUserName(val string) attribute.KeyValue {
- return ProcessRealUserNameKey.String(val)
-}
-
-// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
-// "process.runtime.description" semantic conventions. It represents an
-// additional description about the runtime of the process, for example a
-// specific vendor customization of the runtime environment.
-func ProcessRuntimeDescription(val string) attribute.KeyValue {
- return ProcessRuntimeDescriptionKey.String(val)
-}
-
-// ProcessRuntimeName returns an attribute KeyValue conforming to the
-// "process.runtime.name" semantic conventions. It represents the name of the
-// runtime of this process. For compiled native binaries, this SHOULD be the
-// name of the compiler.
-func ProcessRuntimeName(val string) attribute.KeyValue {
- return ProcessRuntimeNameKey.String(val)
-}
-
-// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
-// "process.runtime.version" semantic conventions. It represents the version of
-// the runtime of this process, as returned by the runtime without
-// modification.
-func ProcessRuntimeVersion(val string) attribute.KeyValue {
- return ProcessRuntimeVersionKey.String(val)
-}
-
-// ProcessSavedUserID returns an attribute KeyValue conforming to the
-// "process.saved_user.id" semantic conventions. It represents the saved user
-// ID (SUID) of the process.
-func ProcessSavedUserID(val int) attribute.KeyValue {
- return ProcessSavedUserIDKey.Int(val)
-}
-
-// ProcessSavedUserName returns an attribute KeyValue conforming to the
-// "process.saved_user.name" semantic conventions. It represents the username
-// of the saved user.
-func ProcessSavedUserName(val string) attribute.KeyValue {
- return ProcessSavedUserNameKey.String(val)
-}
-
-// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
-// "process.session_leader.pid" semantic conventions. It represents the PID of
-// the process's session leader. This is also the session ID (SID) of the
-// process.
-func ProcessSessionLeaderPID(val int) attribute.KeyValue {
- return ProcessSessionLeaderPIDKey.Int(val)
-}
-
-// ProcessUserID returns an attribute KeyValue conforming to the
-// "process.user.id" semantic conventions. It represents the effective user ID
-// (EUID) of the process.
-func ProcessUserID(val int) attribute.KeyValue {
- return ProcessUserIDKey.Int(val)
-}
-
-// ProcessUserName returns an attribute KeyValue conforming to the
-// "process.user.name" semantic conventions. It represents the username of the
-// effective user of the process.
-func ProcessUserName(val string) attribute.KeyValue {
- return ProcessUserNameKey.String(val)
-}
-
-// ProcessVpid returns an attribute KeyValue conforming to the
-// "process.vpid" semantic conventions. It represents the virtual process
-// identifier.
-func ProcessVpid(val int) attribute.KeyValue {
- return ProcessVpidKey.Int(val)
-}
-
-// Attributes for process CPU
-const (
- // ProcessCPUStateKey is the attribute Key conforming to the
- // "process.cpu.state" semantic conventions. It represents the CPU state of
- // the process.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- ProcessCPUStateKey = attribute.Key("process.cpu.state")
-)
-
-var (
- // system
- ProcessCPUStateSystem = ProcessCPUStateKey.String("system")
- // user
- ProcessCPUStateUser = ProcessCPUStateKey.String("user")
- // wait
- ProcessCPUStateWait = ProcessCPUStateKey.String("wait")
-)
-
-// Attributes for remote procedure calls.
-const (
- // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
- // "rpc.connect_rpc.error_code" semantic conventions. It represents the
- // [error codes](https://connect.build/docs/protocol/#error-codes) of the
- // Connect request. Error codes are always string values.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
-
- // RPCGRPCStatusCodeKey is the attribute Key conforming to the
- // "rpc.grpc.status_code" semantic conventions. It represents the [numeric
- // status
- // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of
- // the gRPC request.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
-
- // RPCJsonrpcErrorCodeKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_code" semantic conventions. It represents the
- // `error.code` property of response if it is an error response.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: -32700, 100
- RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
-
- // RPCJsonrpcErrorMessageKey is the attribute Key conforming to the
- // "rpc.jsonrpc.error_message" semantic conventions. It represents the
- // `error.message` property of response if it is an error response.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Parse error', 'User already exists'
- RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
-
- // RPCJsonrpcRequestIDKey is the attribute Key conforming to the
- // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
- // property of request or response. Since protocol allows id to be int,
- // string, `null` or missing (for notifications), value is expected to be
- // cast to string for simplicity. Use empty string in case of `null` value.
- // Omit entirely if this is a notification.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '10', 'request-7', ''
- RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
-
- // RPCJsonrpcVersionKey is the attribute Key conforming to the
- // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
- // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
- // doesn't specify this, the value can be omitted.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2.0', '1.0'
- RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version")
-
- // RPCMessageCompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.compressed_size" semantic conventions. It represents the
- // compressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
-
- // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
- // semantic conventions. It represents the mUST be calculated as two
- // different counters starting from `1` one for sent messages and one for
- // received message.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Note: This way we guarantee that the values will be consistent between
- // different implementations.
- RPCMessageIDKey = attribute.Key("rpc.message.id")
-
- // RPCMessageTypeKey is the attribute Key conforming to the
- // "rpc.message.type" semantic conventions. It represents the whether this
- // is a received or sent message.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageTypeKey = attribute.Key("rpc.message.type")
-
- // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
- // "rpc.message.uncompressed_size" semantic conventions. It represents the
- // uncompressed size of the message in bytes.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
-
- // RPCMethodKey is the attribute Key conforming to the "rpc.method"
- // semantic conventions. It represents the name of the (logical) method
- // being called, must be equal to the $method part in the span name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'exampleMethod'
- // Note: This is the logical name of the method from the RPC interface
- // perspective, which can be different from the name of any implementing
- // method/function. The `code.function` attribute may be used to store the
- // latter (e.g., method actually executing the call on the server side, RPC
- // client stub method on the client side).
- RPCMethodKey = attribute.Key("rpc.method")
-
- // RPCServiceKey is the attribute Key conforming to the "rpc.service"
- // semantic conventions. It represents the full (logical) name of the
- // service being called, including its package name, if applicable.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'myservice.EchoService'
- // Note: This is the logical name of the service from the RPC interface
- // perspective, which can be different from the name of any implementing
- // class. The `code.namespace` attribute may be used to store the latter
- // (despite the attribute name, it may include a class name; e.g., class
- // with method actually executing the call on the server side, RPC client
- // stub class on the client side).
- RPCServiceKey = attribute.Key("rpc.service")
-
- // RPCSystemKey is the attribute Key conforming to the "rpc.system"
- // semantic conventions. It represents a string identifying the remoting
- // system. See below for a list of well-known identifiers.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- RPCSystemKey = attribute.Key("rpc.system")
-)
-
-var (
- // cancelled
- RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
- // unknown
- RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
- // invalid_argument
- RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
- // deadline_exceeded
- RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
- // not_found
- RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
- // already_exists
- RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
- // permission_denied
- RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
- // resource_exhausted
- RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
- // failed_precondition
- RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
- // aborted
- RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
- // out_of_range
- RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
- // unimplemented
- RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
- // internal
- RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
- // unavailable
- RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
- // data_loss
- RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
- // unauthenticated
- RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
-)
-
-var (
- // OK
- RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
- // CANCELLED
- RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
- // UNKNOWN
- RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
- // INVALID_ARGUMENT
- RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
- // DEADLINE_EXCEEDED
- RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
- // NOT_FOUND
- RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
- // ALREADY_EXISTS
- RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
- // PERMISSION_DENIED
- RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
- // RESOURCE_EXHAUSTED
- RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
- // FAILED_PRECONDITION
- RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
- // ABORTED
- RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
- // OUT_OF_RANGE
- RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
- // UNIMPLEMENTED
- RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
- // INTERNAL
- RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
- // UNAVAILABLE
- RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
- // DATA_LOSS
- RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
- // UNAUTHENTICATED
- RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
-)
-
-var (
- // sent
- RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
- // received
- RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
-)
-
-var (
- // gRPC
- RPCSystemGRPC = RPCSystemKey.String("grpc")
- // Java RMI
- RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
- // .NET WCF
- RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
- // Apache Dubbo
- RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
- // Connect RPC
- RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
-)
-
-// RPCJsonrpcErrorCode returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_code" semantic conventions. It represents the
-// `error.code` property of response if it is an error response.
-func RPCJsonrpcErrorCode(val int) attribute.KeyValue {
- return RPCJsonrpcErrorCodeKey.Int(val)
-}
-
-// RPCJsonrpcErrorMessage returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.error_message" semantic conventions. It represents the
-// `error.message` property of response if it is an error response.
-func RPCJsonrpcErrorMessage(val string) attribute.KeyValue {
- return RPCJsonrpcErrorMessageKey.String(val)
-}
-
-// RPCJsonrpcRequestID returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
-// property of request or response. Since protocol allows id to be int, string,
-// `null` or missing (for notifications), value is expected to be cast to
-// string for simplicity. Use empty string in case of `null` value. Omit
-// entirely if this is a notification.
-func RPCJsonrpcRequestID(val string) attribute.KeyValue {
- return RPCJsonrpcRequestIDKey.String(val)
-}
-
-// RPCJsonrpcVersion returns an attribute KeyValue conforming to the
-// "rpc.jsonrpc.version" semantic conventions. It represents the protocol
-// version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
-// doesn't specify this, the value can be omitted.
-func RPCJsonrpcVersion(val string) attribute.KeyValue {
- return RPCJsonrpcVersionKey.String(val)
-}
-
-// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
-// "rpc.message.compressed_size" semantic conventions. It represents the
-// compressed size of the message in bytes.
-func RPCMessageCompressedSize(val int) attribute.KeyValue {
- return RPCMessageCompressedSizeKey.Int(val)
-}
-
-// RPCMessageID returns an attribute KeyValue conforming to the
-// "rpc.message.id" semantic conventions. It represents the mUST be calculated
-// as two different counters starting from `1` one for sent messages and one
-// for received message.
-func RPCMessageID(val int) attribute.KeyValue {
- return RPCMessageIDKey.Int(val)
-}
-
-// RPCMessageUncompressedSize returns an attribute KeyValue conforming to
-// the "rpc.message.uncompressed_size" semantic conventions. It represents the
-// uncompressed size of the message in bytes.
-func RPCMessageUncompressedSize(val int) attribute.KeyValue {
- return RPCMessageUncompressedSizeKey.Int(val)
-}
-
-// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
-// semantic conventions. It represents the name of the (logical) method being
-// called, must be equal to the $method part in the span name.
-func RPCMethod(val string) attribute.KeyValue {
- return RPCMethodKey.String(val)
-}
-
-// RPCService returns an attribute KeyValue conforming to the "rpc.service"
-// semantic conventions. It represents the full (logical) name of the service
-// being called, including its package name, if applicable.
-func RPCService(val string) attribute.KeyValue {
- return RPCServiceKey.String(val)
-}
-
-// These attributes may be used to describe the server in a connection-based
-// network interaction where there is one side that initiates the connection
-// (the client is the side that initiates the connection). This covers all TCP
-// network interactions since TCP is connection-based and one side initiates
-// the connection (an exception is made for peer-to-peer communication over TCP
-// where the "user-facing" surface of the protocol / API doesn't expose a clear
-// notion of client and server). This also covers UDP network interactions
-// where one side initiates the interaction, e.g. QUIC (HTTP/3) and DNS.
-const (
- // ServerAddressKey is the attribute Key conforming to the "server.address"
- // semantic conventions. It represents the server domain name if available
- // without reverse DNS lookup; otherwise, IP address or Unix domain socket
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.address` SHOULD represent the server address
- // behind any intermediaries, for example proxies, if it's available.
- ServerAddressKey = attribute.Key("server.address")
-
- // ServerPortKey is the attribute Key conforming to the "server.port"
- // semantic conventions. It represents the server port number.
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 80, 8080, 443
- // Note: When observed from the client side, and when communicating through
- // an intermediary, `server.port` SHOULD represent the server port behind
- // any intermediaries, for example proxies, if it's available.
- ServerPortKey = attribute.Key("server.port")
-)
-
-// ServerAddress returns an attribute KeyValue conforming to the
-// "server.address" semantic conventions. It represents the server domain name
-// if available without reverse DNS lookup; otherwise, IP address or Unix
-// domain socket name.
-func ServerAddress(val string) attribute.KeyValue {
- return ServerAddressKey.String(val)
-}
-
-// ServerPort returns an attribute KeyValue conforming to the "server.port"
-// semantic conventions. It represents the server port number.
-func ServerPort(val int) attribute.KeyValue {
- return ServerPortKey.Int(val)
-}
-
-// A service instance.
-const (
- // ServiceInstanceIDKey is the attribute Key conforming to the
- // "service.instance.id" semantic conventions. It represents the string ID
- // of the service instance.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '627cc493-f310-47de-96bd-71410b7dec09'
- // Note: MUST be unique for each instance of the same
- // `service.namespace,service.name` pair (in other words
- // `service.namespace,service.name,service.instance.id` triplet MUST be
- // globally unique). The ID helps to
- // distinguish instances of the same service that exist at the same time
- // (e.g. instances of a horizontally scaled
- // service).
- //
- // Implementations, such as SDKs, are recommended to generate a random
- // Version 1 or Version 4 [RFC
- // 4122](https://www.ietf.org/rfc/rfc4122.txt) UUID, but are free to use an
- // inherent unique ID as the source of
- // this value if stability is desirable. In that case, the ID SHOULD be
- // used as source of a UUID Version 5 and
- // SHOULD use the following UUID as the namespace:
- // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
- //
- // UUIDs are typically recommended, as only an opaque value for the
- // purposes of identifying a service instance is
- // needed. Similar to what can be seen in the man page for the
- // [`/etc/machine-id`](https://www.freedesktop.org/software/systemd/man/machine-id.html)
- // file, the underlying
- // data, such as pod name and namespace should be treated as confidential,
- // being the user's choice to expose it
- // or not via another resource attribute.
- //
- // For applications running behind an application server (like unicorn), we
- // do not recommend using one identifier
- // for all processes participating in the application. Instead, it's
- // recommended each division (e.g. a worker
- // thread in unicorn) to have its own instance.id.
- //
- // It's not recommended for a Collector to set `service.instance.id` if it
- // can't unambiguously determine the
- // service instance that is generating that telemetry. For instance,
- // creating an UUID based on `pod.name` will
- // likely be wrong, as the Collector might not know from which container
- // within that pod the telemetry originated.
- // However, Collectors can set the `service.instance.id` if they can
- // unambiguously determine the service instance
- // for that telemetry. This is typically the case for scraping receivers,
- // as they know the target address and
- // port.
- ServiceInstanceIDKey = attribute.Key("service.instance.id")
-
- // ServiceNameKey is the attribute Key conforming to the "service.name"
- // semantic conventions. It represents the logical name of the service.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'shoppingcart'
- // Note: MUST be the same for all instances of horizontally scaled
- // services. If the value was not specified, SDKs MUST fallback to
- // `unknown_service:` concatenated with
- // [`process.executable.name`](process.md), e.g. `unknown_service:bash`. If
- // `process.executable.name` is not available, the value MUST be set to
- // `unknown_service`.
- ServiceNameKey = attribute.Key("service.name")
-
- // ServiceNamespaceKey is the attribute Key conforming to the
- // "service.namespace" semantic conventions. It represents a namespace for
- // `service.name`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Shop'
- // Note: A string value having a meaning that helps to distinguish a group
- // of services, for example the team name that owns a group of services.
- // `service.name` is expected to be unique within the same namespace. If
- // `service.namespace` is not specified in the Resource then `service.name`
- // is expected to be unique for all services that have no explicit
- // namespace defined (so the empty/unspecified namespace is simply one more
- // valid namespace). Zero-length namespace string is assumed equal to
- // unspecified namespace.
- ServiceNamespaceKey = attribute.Key("service.namespace")
-
- // ServiceVersionKey is the attribute Key conforming to the
- // "service.version" semantic conventions. It represents the version string
- // of the service API or implementation. The format is not defined by these
- // conventions.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '2.0.0', 'a01dbef8a'
- ServiceVersionKey = attribute.Key("service.version")
-)
-
-// ServiceInstanceID returns an attribute KeyValue conforming to the
-// "service.instance.id" semantic conventions. It represents the string ID of
-// the service instance.
-func ServiceInstanceID(val string) attribute.KeyValue {
- return ServiceInstanceIDKey.String(val)
-}
-
-// ServiceName returns an attribute KeyValue conforming to the
-// "service.name" semantic conventions. It represents the logical name of the
-// service.
-func ServiceName(val string) attribute.KeyValue {
- return ServiceNameKey.String(val)
-}
-
-// ServiceNamespace returns an attribute KeyValue conforming to the
-// "service.namespace" semantic conventions. It represents a namespace for
-// `service.name`.
-func ServiceNamespace(val string) attribute.KeyValue {
- return ServiceNamespaceKey.String(val)
-}
-
-// ServiceVersion returns an attribute KeyValue conforming to the
-// "service.version" semantic conventions. It represents the version string of
-// the service API or implementation. The format is not defined by these
-// conventions.
-func ServiceVersion(val string) attribute.KeyValue {
- return ServiceVersionKey.String(val)
-}
-
-// Session is defined as the period of time encompassing all activities
-// performed by the application and the actions executed by the end user.
-// Consequently, a Session is represented as a collection of Logs, Events, and
-// Spans emitted by the Client Application throughout the Session's duration.
-// Each Session is assigned a unique identifier, which is included as an
-// attribute in the Logs, Events, and Spans generated during the Session's
-// lifecycle.
-// When a session reaches end of life, typically due to user inactivity or
-// session timeout, a new session identifier will be assigned. The previous
-// session identifier may be provided by the instrumentation so that telemetry
-// backends can link the two sessions.
-const (
- // SessionIDKey is the attribute Key conforming to the "session.id"
- // semantic conventions. It represents a unique id to identify a session.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionIDKey = attribute.Key("session.id")
-
- // SessionPreviousIDKey is the attribute Key conforming to the
- // "session.previous_id" semantic conventions. It represents the previous
- // `session.id` for this user, when known.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '00112233-4455-6677-8899-aabbccddeeff'
- SessionPreviousIDKey = attribute.Key("session.previous_id")
-)
-
-// SessionID returns an attribute KeyValue conforming to the "session.id"
-// semantic conventions. It represents a unique id to identify a session.
-func SessionID(val string) attribute.KeyValue {
- return SessionIDKey.String(val)
-}
-
-// SessionPreviousID returns an attribute KeyValue conforming to the
-// "session.previous_id" semantic conventions. It represents the previous
-// `session.id` for this user, when known.
-func SessionPreviousID(val string) attribute.KeyValue {
- return SessionPreviousIDKey.String(val)
-}
-
-// SignalR attributes
-const (
- // SignalrConnectionStatusKey is the attribute Key conforming to the
- // "signalr.connection.status" semantic conventions. It represents the
- // signalR HTTP connection closure status.
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'app_shutdown', 'timeout'
- SignalrConnectionStatusKey = attribute.Key("signalr.connection.status")
-
- // SignalrTransportKey is the attribute Key conforming to the
- // "signalr.transport" semantic conventions. It represents the [SignalR
- // transport
- // type](https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'web_sockets', 'long_polling'
- SignalrTransportKey = attribute.Key("signalr.transport")
-)
-
-var (
- // The connection was closed normally
- SignalrConnectionStatusNormalClosure = SignalrConnectionStatusKey.String("normal_closure")
- // The connection was closed due to a timeout
- SignalrConnectionStatusTimeout = SignalrConnectionStatusKey.String("timeout")
- // The connection was closed because the app is shutting down
- SignalrConnectionStatusAppShutdown = SignalrConnectionStatusKey.String("app_shutdown")
-)
-
-var (
- // ServerSentEvents protocol
- SignalrTransportServerSentEvents = SignalrTransportKey.String("server_sent_events")
- // LongPolling protocol
- SignalrTransportLongPolling = SignalrTransportKey.String("long_polling")
- // WebSockets protocol
- SignalrTransportWebSockets = SignalrTransportKey.String("web_sockets")
-)
-
-// These attributes may be used to describe the sender of a network
-// exchange/packet. These should be used when there is no client/server
-// relationship between the two sides, or when that relationship is unknown.
-// This covers low-level network interactions (e.g. packet tracing) where you
-// don't know if there was a connection or which side initiated it. This also
-// covers unidirectional UDP flows and peer-to-peer communication where the
-// "user-facing" surface of the protocol / API doesn't expose a clear notion of
-// client and server.
-const (
- // SourceAddressKey is the attribute Key conforming to the "source.address"
- // semantic conventions. It represents the source address - domain name if
- // available without reverse DNS lookup; otherwise, IP address or Unix
- // domain socket name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'source.example.com', '10.1.2.80', '/tmp/my.sock'
- // Note: When observed from the destination side, and when communicating
- // through an intermediary, `source.address` SHOULD represent the source
- // address behind any intermediaries, for example proxies, if it's
- // available.
- SourceAddressKey = attribute.Key("source.address")
-
- // SourcePortKey is the attribute Key conforming to the "source.port"
- // semantic conventions. It represents the source port number
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 3389, 2888
- SourcePortKey = attribute.Key("source.port")
-)
-
-// SourceAddress returns an attribute KeyValue conforming to the
-// "source.address" semantic conventions. It represents the source address -
-// domain name if available without reverse DNS lookup; otherwise, IP address
-// or Unix domain socket name.
-func SourceAddress(val string) attribute.KeyValue {
- return SourceAddressKey.String(val)
-}
-
-// SourcePort returns an attribute KeyValue conforming to the "source.port"
-// semantic conventions. It represents the source port number
-func SourcePort(val int) attribute.KeyValue {
- return SourcePortKey.Int(val)
-}
-
-// Describes System attributes
-const (
- // SystemDeviceKey is the attribute Key conforming to the "system.device"
- // semantic conventions. It represents the device identifier
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '(identifier)'
- SystemDeviceKey = attribute.Key("system.device")
-)
-
-// SystemDevice returns an attribute KeyValue conforming to the
-// "system.device" semantic conventions. It represents the device identifier
-func SystemDevice(val string) attribute.KeyValue {
- return SystemDeviceKey.String(val)
-}
-
-// Describes System CPU attributes
-const (
- // SystemCPULogicalNumberKey is the attribute Key conforming to the
- // "system.cpu.logical_number" semantic conventions. It represents the
- // logical CPU number [0..n-1]
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 1
- SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
-
- // SystemCPUStateKey is the attribute Key conforming to the
- // "system.cpu.state" semantic conventions. It represents the state of the
- // CPU
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'idle', 'interrupt'
- SystemCPUStateKey = attribute.Key("system.cpu.state")
-)
-
-var (
- // user
- SystemCPUStateUser = SystemCPUStateKey.String("user")
- // system
- SystemCPUStateSystem = SystemCPUStateKey.String("system")
- // nice
- SystemCPUStateNice = SystemCPUStateKey.String("nice")
- // idle
- SystemCPUStateIdle = SystemCPUStateKey.String("idle")
- // iowait
- SystemCPUStateIowait = SystemCPUStateKey.String("iowait")
- // interrupt
- SystemCPUStateInterrupt = SystemCPUStateKey.String("interrupt")
- // steal
- SystemCPUStateSteal = SystemCPUStateKey.String("steal")
-)
-
-// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
-// "system.cpu.logical_number" semantic conventions. It represents the logical
-// CPU number [0..n-1]
-func SystemCPULogicalNumber(val int) attribute.KeyValue {
- return SystemCPULogicalNumberKey.Int(val)
-}
-
-// Describes System Memory attributes
-const (
- // SystemMemoryStateKey is the attribute Key conforming to the
- // "system.memory.state" semantic conventions. It represents the memory
- // state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free', 'cached'
- SystemMemoryStateKey = attribute.Key("system.memory.state")
-)
-
-var (
- // used
- SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
- // free
- SystemMemoryStateFree = SystemMemoryStateKey.String("free")
- // shared
- SystemMemoryStateShared = SystemMemoryStateKey.String("shared")
- // buffers
- SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
- // cached
- SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
-)
-
-// Describes System Memory Paging attributes
-const (
- // SystemPagingDirectionKey is the attribute Key conforming to the
- // "system.paging.direction" semantic conventions. It represents the paging
- // access direction
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'in'
- SystemPagingDirectionKey = attribute.Key("system.paging.direction")
-
- // SystemPagingStateKey is the attribute Key conforming to the
- // "system.paging.state" semantic conventions. It represents the memory
- // paging state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'free'
- SystemPagingStateKey = attribute.Key("system.paging.state")
-
- // SystemPagingTypeKey is the attribute Key conforming to the
- // "system.paging.type" semantic conventions. It represents the memory
- // paging type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'minor'
- SystemPagingTypeKey = attribute.Key("system.paging.type")
-)
-
-var (
- // in
- SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
- // out
- SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
-)
-
-var (
- // used
- SystemPagingStateUsed = SystemPagingStateKey.String("used")
- // free
- SystemPagingStateFree = SystemPagingStateKey.String("free")
-)
-
-var (
- // major
- SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
- // minor
- SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
-)
-
-// Describes Filesystem attributes
-const (
- // SystemFilesystemModeKey is the attribute Key conforming to the
- // "system.filesystem.mode" semantic conventions. It represents the
- // filesystem mode
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'rw, ro'
- SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
-
- // SystemFilesystemMountpointKey is the attribute Key conforming to the
- // "system.filesystem.mountpoint" semantic conventions. It represents the
- // filesystem mount path
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/mnt/data'
- SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
-
- // SystemFilesystemStateKey is the attribute Key conforming to the
- // "system.filesystem.state" semantic conventions. It represents the
- // filesystem state
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'used'
- SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
-
- // SystemFilesystemTypeKey is the attribute Key conforming to the
- // "system.filesystem.type" semantic conventions. It represents the
- // filesystem type
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'ext4'
- SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
-)
-
-var (
- // used
- SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
- // free
- SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
- // reserved
- SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
-)
-
-var (
- // fat32
- SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
- // exfat
- SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
- // ntfs
- SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
- // refs
- SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
- // hfsplus
- SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
- // ext4
- SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
-)
-
-// SystemFilesystemMode returns an attribute KeyValue conforming to the
-// "system.filesystem.mode" semantic conventions. It represents the filesystem
-// mode
-func SystemFilesystemMode(val string) attribute.KeyValue {
- return SystemFilesystemModeKey.String(val)
-}
-
-// SystemFilesystemMountpoint returns an attribute KeyValue conforming to
-// the "system.filesystem.mountpoint" semantic conventions. It represents the
-// filesystem mount path
-func SystemFilesystemMountpoint(val string) attribute.KeyValue {
- return SystemFilesystemMountpointKey.String(val)
-}
-
-// Describes Network attributes
-const (
- // SystemNetworkStateKey is the attribute Key conforming to the
- // "system.network.state" semantic conventions. It represents a stateless
- // protocol MUST NOT set this attribute
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'close_wait'
- SystemNetworkStateKey = attribute.Key("system.network.state")
-)
-
-var (
- // close
- SystemNetworkStateClose = SystemNetworkStateKey.String("close")
- // close_wait
- SystemNetworkStateCloseWait = SystemNetworkStateKey.String("close_wait")
- // closing
- SystemNetworkStateClosing = SystemNetworkStateKey.String("closing")
- // delete
- SystemNetworkStateDelete = SystemNetworkStateKey.String("delete")
- // established
- SystemNetworkStateEstablished = SystemNetworkStateKey.String("established")
- // fin_wait_1
- SystemNetworkStateFinWait1 = SystemNetworkStateKey.String("fin_wait_1")
- // fin_wait_2
- SystemNetworkStateFinWait2 = SystemNetworkStateKey.String("fin_wait_2")
- // last_ack
- SystemNetworkStateLastAck = SystemNetworkStateKey.String("last_ack")
- // listen
- SystemNetworkStateListen = SystemNetworkStateKey.String("listen")
- // syn_recv
- SystemNetworkStateSynRecv = SystemNetworkStateKey.String("syn_recv")
- // syn_sent
- SystemNetworkStateSynSent = SystemNetworkStateKey.String("syn_sent")
- // time_wait
- SystemNetworkStateTimeWait = SystemNetworkStateKey.String("time_wait")
-)
-
-// Describes System Process attributes
-const (
- // SystemProcessStatusKey is the attribute Key conforming to the
- // "system.process.status" semantic conventions. It represents the process
- // state, e.g., [Linux Process State
- // Codes](https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'running'
- SystemProcessStatusKey = attribute.Key("system.process.status")
-)
-
-var (
- // running
- SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
- // sleeping
- SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
- // stopped
- SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
- // defunct
- SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
-)
-
-// Attributes for telemetry SDK.
-const (
- // TelemetrySDKLanguageKey is the attribute Key conforming to the
- // "telemetry.sdk.language" semantic conventions. It represents the
- // language of the telemetry SDK.
- //
- // Type: Enum
- // RequirementLevel: Required
- // Stability: stable
- TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
-
- // TelemetrySDKNameKey is the attribute Key conforming to the
- // "telemetry.sdk.name" semantic conventions. It represents the name of the
- // telemetry SDK as defined above.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: 'opentelemetry'
- // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute
- // to `opentelemetry`.
- // If another SDK, like a fork or a vendor-provided implementation, is
- // used, this SDK MUST set the
- // `telemetry.sdk.name` attribute to the fully-qualified class or module
- // name of this SDK's main entry point
- // or another suitable identifier depending on the language.
- // The identifier `opentelemetry` is reserved and MUST NOT be used in this
- // case.
- // All custom identifiers SHOULD be stable across different versions of an
- // implementation.
- TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
-
- // TelemetrySDKVersionKey is the attribute Key conforming to the
- // "telemetry.sdk.version" semantic conventions. It represents the version
- // string of the telemetry SDK.
- //
- // Type: string
- // RequirementLevel: Required
- // Stability: stable
- // Examples: '1.2.3'
- TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
-
- // TelemetryDistroNameKey is the attribute Key conforming to the
- // "telemetry.distro.name" semantic conventions. It represents the name of
- // the auto instrumentation agent or distribution, if used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'parts-unlimited-java'
- // Note: Official auto instrumentation agents and distributions SHOULD set
- // the `telemetry.distro.name` attribute to
- // a string starting with `opentelemetry-`, e.g.
- // `opentelemetry-java-instrumentation`.
- TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
-
- // TelemetryDistroVersionKey is the attribute Key conforming to the
- // "telemetry.distro.version" semantic conventions. It represents the
- // version string of the auto instrumentation agent or distribution, if
- // used.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2.3'
- TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
-)
-
-var (
- // cpp
- TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
- // dotnet
- TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
- // erlang
- TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
- // go
- TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
- // java
- TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
- // nodejs
- TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
- // php
- TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
- // python
- TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
- // ruby
- TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
- // rust
- TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
- // swift
- TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
- // webjs
- TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs")
-)
-
-// TelemetrySDKName returns an attribute KeyValue conforming to the
-// "telemetry.sdk.name" semantic conventions. It represents the name of the
-// telemetry SDK as defined above.
-func TelemetrySDKName(val string) attribute.KeyValue {
- return TelemetrySDKNameKey.String(val)
-}
-
-// TelemetrySDKVersion returns an attribute KeyValue conforming to the
-// "telemetry.sdk.version" semantic conventions. It represents the version
-// string of the telemetry SDK.
-func TelemetrySDKVersion(val string) attribute.KeyValue {
- return TelemetrySDKVersionKey.String(val)
-}
-
-// TelemetryDistroName returns an attribute KeyValue conforming to the
-// "telemetry.distro.name" semantic conventions. It represents the name of the
-// auto instrumentation agent or distribution, if used.
-func TelemetryDistroName(val string) attribute.KeyValue {
- return TelemetryDistroNameKey.String(val)
-}
-
-// TelemetryDistroVersion returns an attribute KeyValue conforming to the
-// "telemetry.distro.version" semantic conventions. It represents the version
-// string of the auto instrumentation agent or distribution, if used.
-func TelemetryDistroVersion(val string) attribute.KeyValue {
- return TelemetryDistroVersionKey.String(val)
-}
-
-// These attributes may be used for any operation to store information about a
-// thread that started a span.
-const (
- // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
- // conventions. It represents the current "managed" thread ID (as opposed
- // to OS thread ID).
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 42
- ThreadIDKey = attribute.Key("thread.id")
-
- // ThreadNameKey is the attribute Key conforming to the "thread.name"
- // semantic conventions. It represents the current thread name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'main'
- ThreadNameKey = attribute.Key("thread.name")
-)
-
-// ThreadID returns an attribute KeyValue conforming to the "thread.id"
-// semantic conventions. It represents the current "managed" thread ID (as
-// opposed to OS thread ID).
-func ThreadID(val int) attribute.KeyValue {
- return ThreadIDKey.Int(val)
-}
-
-// ThreadName returns an attribute KeyValue conforming to the "thread.name"
-// semantic conventions. It represents the current thread name.
-func ThreadName(val string) attribute.KeyValue {
- return ThreadNameKey.String(val)
-}
-
-// Semantic convention attributes in the TLS namespace.
-const (
- // TLSCipherKey is the attribute Key conforming to the "tls.cipher"
- // semantic conventions. It represents the string indicating the
- // [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5)
- // used during the current connection.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'TLS_RSA_WITH_3DES_EDE_CBC_SHA',
- // 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'
- // Note: The values allowed for `tls.cipher` MUST be one of the
- // `Descriptions` of the [registered TLS Cipher
- // Suits](https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4).
- TLSCipherKey = attribute.Key("tls.cipher")
-
- // TLSClientCertificateKey is the attribute Key conforming to the
- // "tls.client.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the client. This is
- // usually mutually-exclusive of `client.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSClientCertificateKey = attribute.Key("tls.client.certificate")
-
- // TLSClientCertificateChainKey is the attribute Key conforming to the
- // "tls.client.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the client. This is usually mutually-exclusive of
- // `client.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
-
- // TLSClientHashMd5Key is the attribute Key conforming to the
- // "tls.client.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
-
- // TLSClientHashSha1Key is the attribute Key conforming to the
- // "tls.client.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
-
- // TLSClientHashSha256Key is the attribute Key conforming to the
- // "tls.client.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the client. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
-
- // TLSClientIssuerKey is the attribute Key conforming to the
- // "tls.client.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSClientIssuerKey = attribute.Key("tls.client.issuer")
-
- // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
- // semantic conventions. It represents a hash that identifies clients based
- // on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSClientJa3Key = attribute.Key("tls.client.ja3")
-
- // TLSClientNotAfterKey is the attribute Key conforming to the
- // "tls.client.not_after" semantic conventions. It represents the date/Time
- // indicating when client certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
-
- // TLSClientNotBeforeKey is the attribute Key conforming to the
- // "tls.client.not_before" semantic conventions. It represents the
- // date/Time indicating when client certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
-
- // TLSClientServerNameKey is the attribute Key conforming to the
- // "tls.client.server_name" semantic conventions. It represents the also
- // called an SNI, this tells the server which hostname to which the client
- // is attempting to connect to.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'opentelemetry.io'
- TLSClientServerNameKey = attribute.Key("tls.client.server_name")
-
- // TLSClientSubjectKey is the attribute Key conforming to the
- // "tls.client.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myclient, OU=Documentation Team, DC=example, DC=com'
- TLSClientSubjectKey = attribute.Key("tls.client.subject")
-
- // TLSClientSupportedCiphersKey is the attribute Key conforming to the
- // "tls.client.supported_ciphers" semantic conventions. It represents the
- // array of ciphers offered by the client during the client hello.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
- // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", "..."'
- TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
-
- // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
- // conventions. It represents the string indicating the curve used for the
- // given cipher, when applicable
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'secp256r1'
- TLSCurveKey = attribute.Key("tls.curve")
-
- // TLSEstablishedKey is the attribute Key conforming to the
- // "tls.established" semantic conventions. It represents the boolean flag
- // indicating if the TLS negotiation was successful and transitioned to an
- // encrypted tunnel.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSEstablishedKey = attribute.Key("tls.established")
-
- // TLSNextProtocolKey is the attribute Key conforming to the
- // "tls.next_protocol" semantic conventions. It represents the string
- // indicating the protocol being tunneled. Per the values in the [IANA
- // registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
- // this string should be lower case.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'http/1.1'
- TLSNextProtocolKey = attribute.Key("tls.next_protocol")
-
- // TLSProtocolNameKey is the attribute Key conforming to the
- // "tls.protocol.name" semantic conventions. It represents the normalized
- // lowercase protocol name parsed from original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: Enum
- // RequirementLevel: Optional
- // Stability: experimental
- TLSProtocolNameKey = attribute.Key("tls.protocol.name")
-
- // TLSProtocolVersionKey is the attribute Key conforming to the
- // "tls.protocol.version" semantic conventions. It represents the numeric
- // part of the version parsed from the original string of the negotiated
- // [SSL/TLS protocol
- // version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1.2', '3'
- TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
-
- // TLSResumedKey is the attribute Key conforming to the "tls.resumed"
- // semantic conventions. It represents the boolean flag indicating if this
- // TLS connection was resumed from an existing TLS negotiation.
- //
- // Type: boolean
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: True
- TLSResumedKey = attribute.Key("tls.resumed")
-
- // TLSServerCertificateKey is the attribute Key conforming to the
- // "tls.server.certificate" semantic conventions. It represents the
- // pEM-encoded stand-alone certificate offered by the server. This is
- // usually mutually-exclusive of `server.certificate_chain` since this
- // value also exists in that list.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...'
- TLSServerCertificateKey = attribute.Key("tls.server.certificate")
-
- // TLSServerCertificateChainKey is the attribute Key conforming to the
- // "tls.server.certificate_chain" semantic conventions. It represents the
- // array of PEM-encoded certificates that make up the certificate chain
- // offered by the server. This is usually mutually-exclusive of
- // `server.certificate` since that value should be the first certificate in
- // the chain.
- //
- // Type: string[]
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'MII...', 'MI...'
- TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
-
- // TLSServerHashMd5Key is the attribute Key conforming to the
- // "tls.server.hash.md5" semantic conventions. It represents the
- // certificate fingerprint using the MD5 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC'
- TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
-
- // TLSServerHashSha1Key is the attribute Key conforming to the
- // "tls.server.hash.sha1" semantic conventions. It represents the
- // certificate fingerprint using the SHA1 digest of DER-encoded version of
- // certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '9E393D93138888D288266C2D915214D1D1CCEB2A'
- TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
-
- // TLSServerHashSha256Key is the attribute Key conforming to the
- // "tls.server.hash.sha256" semantic conventions. It represents the
- // certificate fingerprint using the SHA256 digest of DER-encoded version
- // of certificate offered by the server. For consistency with other hash
- // values, this value should be formatted as an uppercase hash.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples:
- // '0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0'
- TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
-
- // TLSServerIssuerKey is the attribute Key conforming to the
- // "tls.server.issuer" semantic conventions. It represents the
- // distinguished name of
- // [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6)
- // of the issuer of the x.509 certificate presented by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=Example Root CA, OU=Infrastructure Team, DC=example,
- // DC=com'
- TLSServerIssuerKey = attribute.Key("tls.server.issuer")
-
- // TLSServerJa3sKey is the attribute Key conforming to the
- // "tls.server.ja3s" semantic conventions. It represents a hash that
- // identifies servers based on how they perform an SSL/TLS handshake.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'd4e5b18d6b55c71272893221c96ba240'
- TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
-
- // TLSServerNotAfterKey is the attribute Key conforming to the
- // "tls.server.not_after" semantic conventions. It represents the date/Time
- // indicating when server certificate is no longer considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '2021-01-01T00:00:00.000Z'
- TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
-
- // TLSServerNotBeforeKey is the attribute Key conforming to the
- // "tls.server.not_before" semantic conventions. It represents the
- // date/Time indicating when server certificate is first considered valid.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '1970-01-01T00:00:00.000Z'
- TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
-
- // TLSServerSubjectKey is the attribute Key conforming to the
- // "tls.server.subject" semantic conventions. It represents the
- // distinguished name of subject of the x.509 certificate presented by the
- // server.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'CN=myserver, OU=Documentation Team, DC=example, DC=com'
- TLSServerSubjectKey = attribute.Key("tls.server.subject")
-)
-
-var (
- // ssl
- TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
- // tls
- TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
-)
-
-// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
-// semantic conventions. It represents the string indicating the
-// [cipher](https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5) used
-// during the current connection.
-func TLSCipher(val string) attribute.KeyValue {
- return TLSCipherKey.String(val)
-}
-
-// TLSClientCertificate returns an attribute KeyValue conforming to the
-// "tls.client.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the client. This is usually
-// mutually-exclusive of `client.certificate_chain` since this value also
-// exists in that list.
-func TLSClientCertificate(val string) attribute.KeyValue {
- return TLSClientCertificateKey.String(val)
-}
-
-// TLSClientCertificateChain returns an attribute KeyValue conforming to the
-// "tls.client.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the client. This is usually mutually-exclusive of `client.certificate` since
-// that value should be the first certificate in the chain.
-func TLSClientCertificateChain(val ...string) attribute.KeyValue {
- return TLSClientCertificateChainKey.StringSlice(val)
-}
-
-// TLSClientHashMd5 returns an attribute KeyValue conforming to the
-// "tls.client.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashMd5(val string) attribute.KeyValue {
- return TLSClientHashMd5Key.String(val)
-}
-
-// TLSClientHashSha1 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha1(val string) attribute.KeyValue {
- return TLSClientHashSha1Key.String(val)
-}
-
-// TLSClientHashSha256 returns an attribute KeyValue conforming to the
-// "tls.client.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the client. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSClientHashSha256(val string) attribute.KeyValue {
- return TLSClientHashSha256Key.String(val)
-}
-
-// TLSClientIssuer returns an attribute KeyValue conforming to the
-// "tls.client.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSClientIssuer(val string) attribute.KeyValue {
- return TLSClientIssuerKey.String(val)
-}
-
-// TLSClientJa3 returns an attribute KeyValue conforming to the
-// "tls.client.ja3" semantic conventions. It represents a hash that identifies
-// clients based on how they perform an SSL/TLS handshake.
-func TLSClientJa3(val string) attribute.KeyValue {
- return TLSClientJa3Key.String(val)
-}
-
-// TLSClientNotAfter returns an attribute KeyValue conforming to the
-// "tls.client.not_after" semantic conventions. It represents the date/Time
-// indicating when client certificate is no longer considered valid.
-func TLSClientNotAfter(val string) attribute.KeyValue {
- return TLSClientNotAfterKey.String(val)
-}
-
-// TLSClientNotBefore returns an attribute KeyValue conforming to the
-// "tls.client.not_before" semantic conventions. It represents the date/Time
-// indicating when client certificate is first considered valid.
-func TLSClientNotBefore(val string) attribute.KeyValue {
- return TLSClientNotBeforeKey.String(val)
-}
-
-// TLSClientServerName returns an attribute KeyValue conforming to the
-// "tls.client.server_name" semantic conventions. It represents the also called
-// an SNI, this tells the server which hostname to which the client is
-// attempting to connect to.
-func TLSClientServerName(val string) attribute.KeyValue {
- return TLSClientServerNameKey.String(val)
-}
-
-// TLSClientSubject returns an attribute KeyValue conforming to the
-// "tls.client.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the client.
-func TLSClientSubject(val string) attribute.KeyValue {
- return TLSClientSubjectKey.String(val)
-}
-
-// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
-// "tls.client.supported_ciphers" semantic conventions. It represents the array
-// of ciphers offered by the client during the client hello.
-func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
- return TLSClientSupportedCiphersKey.StringSlice(val)
-}
-
-// TLSCurve returns an attribute KeyValue conforming to the "tls.curve"
-// semantic conventions. It represents the string indicating the curve used for
-// the given cipher, when applicable
-func TLSCurve(val string) attribute.KeyValue {
- return TLSCurveKey.String(val)
-}
-
-// TLSEstablished returns an attribute KeyValue conforming to the
-// "tls.established" semantic conventions. It represents the boolean flag
-// indicating if the TLS negotiation was successful and transitioned to an
-// encrypted tunnel.
-func TLSEstablished(val bool) attribute.KeyValue {
- return TLSEstablishedKey.Bool(val)
-}
-
-// TLSNextProtocol returns an attribute KeyValue conforming to the
-// "tls.next_protocol" semantic conventions. It represents the string
-// indicating the protocol being tunneled. Per the values in the [IANA
-// registry](https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids),
-// this string should be lower case.
-func TLSNextProtocol(val string) attribute.KeyValue {
- return TLSNextProtocolKey.String(val)
-}
-
-// TLSProtocolVersion returns an attribute KeyValue conforming to the
-// "tls.protocol.version" semantic conventions. It represents the numeric part
-// of the version parsed from the original string of the negotiated [SSL/TLS
-// protocol
-// version](https://www.openssl.org/docs/man1.1.1/man3/SSL_get_version.html#RETURN-VALUES)
-func TLSProtocolVersion(val string) attribute.KeyValue {
- return TLSProtocolVersionKey.String(val)
-}
-
-// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
-// semantic conventions. It represents the boolean flag indicating if this TLS
-// connection was resumed from an existing TLS negotiation.
-func TLSResumed(val bool) attribute.KeyValue {
- return TLSResumedKey.Bool(val)
-}
-
-// TLSServerCertificate returns an attribute KeyValue conforming to the
-// "tls.server.certificate" semantic conventions. It represents the pEM-encoded
-// stand-alone certificate offered by the server. This is usually
-// mutually-exclusive of `server.certificate_chain` since this value also
-// exists in that list.
-func TLSServerCertificate(val string) attribute.KeyValue {
- return TLSServerCertificateKey.String(val)
-}
-
-// TLSServerCertificateChain returns an attribute KeyValue conforming to the
-// "tls.server.certificate_chain" semantic conventions. It represents the array
-// of PEM-encoded certificates that make up the certificate chain offered by
-// the server. This is usually mutually-exclusive of `server.certificate` since
-// that value should be the first certificate in the chain.
-func TLSServerCertificateChain(val ...string) attribute.KeyValue {
- return TLSServerCertificateChainKey.StringSlice(val)
-}
-
-// TLSServerHashMd5 returns an attribute KeyValue conforming to the
-// "tls.server.hash.md5" semantic conventions. It represents the certificate
-// fingerprint using the MD5 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashMd5(val string) attribute.KeyValue {
- return TLSServerHashMd5Key.String(val)
-}
-
-// TLSServerHashSha1 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha1" semantic conventions. It represents the certificate
-// fingerprint using the SHA1 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha1(val string) attribute.KeyValue {
- return TLSServerHashSha1Key.String(val)
-}
-
-// TLSServerHashSha256 returns an attribute KeyValue conforming to the
-// "tls.server.hash.sha256" semantic conventions. It represents the certificate
-// fingerprint using the SHA256 digest of DER-encoded version of certificate
-// offered by the server. For consistency with other hash values, this value
-// should be formatted as an uppercase hash.
-func TLSServerHashSha256(val string) attribute.KeyValue {
- return TLSServerHashSha256Key.String(val)
-}
-
-// TLSServerIssuer returns an attribute KeyValue conforming to the
-// "tls.server.issuer" semantic conventions. It represents the distinguished
-// name of
-// [subject](https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6) of
-// the issuer of the x.509 certificate presented by the client.
-func TLSServerIssuer(val string) attribute.KeyValue {
- return TLSServerIssuerKey.String(val)
-}
-
-// TLSServerJa3s returns an attribute KeyValue conforming to the
-// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
-// servers based on how they perform an SSL/TLS handshake.
-func TLSServerJa3s(val string) attribute.KeyValue {
- return TLSServerJa3sKey.String(val)
-}
-
-// TLSServerNotAfter returns an attribute KeyValue conforming to the
-// "tls.server.not_after" semantic conventions. It represents the date/Time
-// indicating when server certificate is no longer considered valid.
-func TLSServerNotAfter(val string) attribute.KeyValue {
- return TLSServerNotAfterKey.String(val)
-}
-
-// TLSServerNotBefore returns an attribute KeyValue conforming to the
-// "tls.server.not_before" semantic conventions. It represents the date/Time
-// indicating when server certificate is first considered valid.
-func TLSServerNotBefore(val string) attribute.KeyValue {
- return TLSServerNotBeforeKey.String(val)
-}
-
-// TLSServerSubject returns an attribute KeyValue conforming to the
-// "tls.server.subject" semantic conventions. It represents the distinguished
-// name of subject of the x.509 certificate presented by the server.
-func TLSServerSubject(val string) attribute.KeyValue {
- return TLSServerSubjectKey.String(val)
-}
-
-// Attributes describing URL.
-const (
- // URLDomainKey is the attribute Key conforming to the "url.domain"
- // semantic conventions. It represents the domain extracted from the
- // `url.full`, such as "opentelemetry.io".
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'www.foo.bar', 'opentelemetry.io', '3.12.167.2',
- // '[1080:0:0:0:8:800:200C:417A]'
- // Note: In some cases a URL may refer to an IP and/or port directly,
- // without a domain name. In this case, the IP address would go to the
- // domain field. If the URL contains a [literal IPv6
- // address](https://www.rfc-editor.org/rfc/rfc2732#section-2) enclosed by
- // `[` and `]`, the `[` and `]` characters should also be captured in the
- // domain field.
- URLDomainKey = attribute.Key("url.domain")
-
- // URLExtensionKey is the attribute Key conforming to the "url.extension"
- // semantic conventions. It represents the file extension extracted from
- // the `url.full`, excluding the leading dot.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'png', 'gz'
- // Note: The file extension is only set if it exists, as not every url has
- // a file extension. When the file name has multiple extensions
- // `example.tar.gz`, only the last one should be captured `gz`, not
- // `tar.gz`.
- URLExtensionKey = attribute.Key("url.extension")
-
- // URLFragmentKey is the attribute Key conforming to the "url.fragment"
- // semantic conventions. It represents the [URI
- // fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'SemConv'
- URLFragmentKey = attribute.Key("url.fragment")
-
- // URLFullKey is the attribute Key conforming to the "url.full" semantic
- // conventions. It represents the absolute URL describing a network
- // resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // '//localhost'
- // Note: For network calls, URL usually has
- // `scheme://host[:port][path][?query][#fragment]` format, where the
- // fragment is not transmitted over HTTP, but if it is known, it SHOULD be
- // included nevertheless.
- // `url.full` MUST NOT contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case username and
- // password SHOULD be redacted and attribute's value SHOULD be
- // `https://REDACTED:REDACTED@www.example.com/`.
- // `url.full` SHOULD capture the absolute URL when it is available (or can
- // be reconstructed). Sensitive content provided in `url.full` SHOULD be
- // scrubbed when instrumentations can identify it.
- URLFullKey = attribute.Key("url.full")
-
- // URLOriginalKey is the attribute Key conforming to the "url.original"
- // semantic conventions. It represents the unmodified original URL as seen
- // in the event source.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv',
- // 'search?q=OpenTelemetry'
- // Note: In network monitoring, the observed URL may be a full URL, whereas
- // in access logs, the URL is often just represented as a path. This field
- // is meant to represent the URL as it was observed, complete or not.
- // `url.original` might contain credentials passed via URL in form of
- // `https://username:password@www.example.com/`. In such case password and
- // username SHOULD NOT be redacted and attribute's value SHOULD remain the
- // same.
- URLOriginalKey = attribute.Key("url.original")
-
- // URLPathKey is the attribute Key conforming to the "url.path" semantic
- // conventions. It represents the [URI
- // path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: '/search'
- // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLPathKey = attribute.Key("url.path")
-
- // URLPortKey is the attribute Key conforming to the "url.port" semantic
- // conventions. It represents the port extracted from the `url.full`
- //
- // Type: int
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 443
- URLPortKey = attribute.Key("url.port")
-
- // URLQueryKey is the attribute Key conforming to the "url.query" semantic
- // conventions. It represents the [URI
- // query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'q=OpenTelemetry'
- // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
- // instrumentations can identify it.
- URLQueryKey = attribute.Key("url.query")
-
- // URLRegisteredDomainKey is the attribute Key conforming to the
- // "url.registered_domain" semantic conventions. It represents the highest
- // registered url domain, stripped of the subdomain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'example.com', 'foo.co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org). For example, the registered domain for
- // `foo.example.com` is `example.com`. Trying to approximate this by simply
- // taking the last two labels will not work well for TLDs such as `co.uk`.
- URLRegisteredDomainKey = attribute.Key("url.registered_domain")
-
- // URLSchemeKey is the attribute Key conforming to the "url.scheme"
- // semantic conventions. It represents the [URI
- // scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
- // identifying the used protocol.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'https', 'ftp', 'telnet'
- URLSchemeKey = attribute.Key("url.scheme")
-
- // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
- // semantic conventions. It represents the subdomain portion of a fully
- // qualified domain name includes all of the names except the host name
- // under the registered_domain. In a partially qualified domain, or if the
- // qualification level of the full name cannot be determined, subdomain
- // contains all of the names below the registered domain.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'east', 'sub2.sub1'
- // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If
- // the domain has multiple levels of subdomain, such as
- // `sub2.sub1.example.com`, the subdomain field should contain `sub2.sub1`,
- // with no trailing period.
- URLSubdomainKey = attribute.Key("url.subdomain")
-
- // URLTemplateKey is the attribute Key conforming to the "url.template"
- // semantic conventions. It represents the low-cardinality template of an
- // [absolute path
- // reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '/users/{id}', '/users/:id', '/users?id={id}'
- URLTemplateKey = attribute.Key("url.template")
-
- // URLTopLevelDomainKey is the attribute Key conforming to the
- // "url.top_level_domain" semantic conventions. It represents the effective
- // top level domain (eTLD), also known as the domain suffix, is the last
- // part of the domain name. For example, the top level domain for
- // example.com is `com`.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'com', 'co.uk'
- // Note: This value can be determined precisely with the [public suffix
- // list](http://publicsuffix.org).
- URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
-)
-
-// URLDomain returns an attribute KeyValue conforming to the "url.domain"
-// semantic conventions. It represents the domain extracted from the
-// `url.full`, such as "opentelemetry.io".
-func URLDomain(val string) attribute.KeyValue {
- return URLDomainKey.String(val)
-}
-
-// URLExtension returns an attribute KeyValue conforming to the
-// "url.extension" semantic conventions. It represents the file extension
-// extracted from the `url.full`, excluding the leading dot.
-func URLExtension(val string) attribute.KeyValue {
- return URLExtensionKey.String(val)
-}
-
-// URLFragment returns an attribute KeyValue conforming to the
-// "url.fragment" semantic conventions. It represents the [URI
-// fragment](https://www.rfc-editor.org/rfc/rfc3986#section-3.5) component
-func URLFragment(val string) attribute.KeyValue {
- return URLFragmentKey.String(val)
-}
-
-// URLFull returns an attribute KeyValue conforming to the "url.full"
-// semantic conventions. It represents the absolute URL describing a network
-// resource according to [RFC3986](https://www.rfc-editor.org/rfc/rfc3986)
-func URLFull(val string) attribute.KeyValue {
- return URLFullKey.String(val)
-}
-
-// URLOriginal returns an attribute KeyValue conforming to the
-// "url.original" semantic conventions. It represents the unmodified original
-// URL as seen in the event source.
-func URLOriginal(val string) attribute.KeyValue {
- return URLOriginalKey.String(val)
-}
-
-// URLPath returns an attribute KeyValue conforming to the "url.path"
-// semantic conventions. It represents the [URI
-// path](https://www.rfc-editor.org/rfc/rfc3986#section-3.3) component
-func URLPath(val string) attribute.KeyValue {
- return URLPathKey.String(val)
-}
-
-// URLPort returns an attribute KeyValue conforming to the "url.port"
-// semantic conventions. It represents the port extracted from the `url.full`
-func URLPort(val int) attribute.KeyValue {
- return URLPortKey.Int(val)
-}
-
-// URLQuery returns an attribute KeyValue conforming to the "url.query"
-// semantic conventions. It represents the [URI
-// query](https://www.rfc-editor.org/rfc/rfc3986#section-3.4) component
-func URLQuery(val string) attribute.KeyValue {
- return URLQueryKey.String(val)
-}
-
-// URLRegisteredDomain returns an attribute KeyValue conforming to the
-// "url.registered_domain" semantic conventions. It represents the highest
-// registered url domain, stripped of the subdomain.
-func URLRegisteredDomain(val string) attribute.KeyValue {
- return URLRegisteredDomainKey.String(val)
-}
-
-// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
-// semantic conventions. It represents the [URI
-// scheme](https://www.rfc-editor.org/rfc/rfc3986#section-3.1) component
-// identifying the used protocol.
-func URLScheme(val string) attribute.KeyValue {
- return URLSchemeKey.String(val)
-}
-
-// URLSubdomain returns an attribute KeyValue conforming to the
-// "url.subdomain" semantic conventions. It represents the subdomain portion of
-// a fully qualified domain name includes all of the names except the host name
-// under the registered_domain. In a partially qualified domain, or if the
-// qualification level of the full name cannot be determined, subdomain
-// contains all of the names below the registered domain.
-func URLSubdomain(val string) attribute.KeyValue {
- return URLSubdomainKey.String(val)
-}
-
-// URLTemplate returns an attribute KeyValue conforming to the
-// "url.template" semantic conventions. It represents the low-cardinality
-// template of an [absolute path
-// reference](https://www.rfc-editor.org/rfc/rfc3986#section-4.2).
-func URLTemplate(val string) attribute.KeyValue {
- return URLTemplateKey.String(val)
-}
-
-// URLTopLevelDomain returns an attribute KeyValue conforming to the
-// "url.top_level_domain" semantic conventions. It represents the effective top
-// level domain (eTLD), also known as the domain suffix, is the last part of
-// the domain name. For example, the top level domain for example.com is `com`.
-func URLTopLevelDomain(val string) attribute.KeyValue {
- return URLTopLevelDomainKey.String(val)
-}
-
-// Describes user-agent attributes.
-const (
- // UserAgentNameKey is the attribute Key conforming to the
- // "user_agent.name" semantic conventions. It represents the name of the
- // user-agent extracted from original. Usually refers to the browser's
- // name.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'Safari', 'YourApp'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's name
- // from original string. In the case of using a user-agent for non-browser
- // products, such as microservices with multiple names/versions inside the
- // `user_agent.original`, the most significant name SHOULD be selected. In
- // such a scenario it should align with `user_agent.version`
- UserAgentNameKey = attribute.Key("user_agent.name")
-
- // UserAgentOriginalKey is the attribute Key conforming to the
- // "user_agent.original" semantic conventions. It represents the value of
- // the [HTTP
- // User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
- // header sent by the client.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: stable
- // Examples: 'CERN-LineMode/2.15 libwww/2.17b3', 'Mozilla/5.0 (iPhone; CPU
- // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
- // Version/14.1.2 Mobile/15E148 Safari/604.1', 'YourApp/1.0.0
- // grpc-java-okhttp/1.27.2'
- UserAgentOriginalKey = attribute.Key("user_agent.original")
-
- // UserAgentVersionKey is the attribute Key conforming to the
- // "user_agent.version" semantic conventions. It represents the version of
- // the user-agent extracted from original. Usually refers to the browser's
- // version
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '14.1.2', '1.0.0'
- // Note: [Example](https://www.whatsmyua.info) of extracting browser's
- // version from original string. In the case of using a user-agent for
- // non-browser products, such as microservices with multiple names/versions
- // inside the `user_agent.original`, the most significant version SHOULD be
- // selected. In such a scenario it should align with `user_agent.name`
- UserAgentVersionKey = attribute.Key("user_agent.version")
-)
-
-// UserAgentName returns an attribute KeyValue conforming to the
-// "user_agent.name" semantic conventions. It represents the name of the
-// user-agent extracted from original. Usually refers to the browser's name.
-func UserAgentName(val string) attribute.KeyValue {
- return UserAgentNameKey.String(val)
-}
-
-// UserAgentOriginal returns an attribute KeyValue conforming to the
-// "user_agent.original" semantic conventions. It represents the value of the
-// [HTTP
-// User-Agent](https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent)
-// header sent by the client.
-func UserAgentOriginal(val string) attribute.KeyValue {
- return UserAgentOriginalKey.String(val)
-}
-
-// UserAgentVersion returns an attribute KeyValue conforming to the
-// "user_agent.version" semantic conventions. It represents the version of the
-// user-agent extracted from original. Usually refers to the browser's version
-func UserAgentVersion(val string) attribute.KeyValue {
- return UserAgentVersionKey.String(val)
-}
-
-// The attributes used to describe the packaged software running the
-// application code.
-const (
- // WebEngineDescriptionKey is the attribute Key conforming to the
- // "webengine.description" semantic conventions. It represents the
- // additional description of the web engine (e.g. detailed version and
- // edition information).
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
- // 2.2.2.Final'
- WebEngineDescriptionKey = attribute.Key("webengine.description")
-
- // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
- // semantic conventions. It represents the name of the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: 'WildFly'
- WebEngineNameKey = attribute.Key("webengine.name")
-
- // WebEngineVersionKey is the attribute Key conforming to the
- // "webengine.version" semantic conventions. It represents the version of
- // the web engine.
- //
- // Type: string
- // RequirementLevel: Optional
- // Stability: experimental
- // Examples: '21.0.0'
- WebEngineVersionKey = attribute.Key("webengine.version")
-)
-
-// WebEngineDescription returns an attribute KeyValue conforming to the
-// "webengine.description" semantic conventions. It represents the additional
-// description of the web engine (e.g. detailed version and edition
-// information).
-func WebEngineDescription(val string) attribute.KeyValue {
- return WebEngineDescriptionKey.String(val)
-}
-
-// WebEngineName returns an attribute KeyValue conforming to the
-// "webengine.name" semantic conventions. It represents the name of the web
-// engine.
-func WebEngineName(val string) attribute.KeyValue {
- return WebEngineNameKey.String(val)
-}
-
-// WebEngineVersion returns an attribute KeyValue conforming to the
-// "webengine.version" semantic conventions. It represents the version of the
-// web engine.
-func WebEngineVersion(val string) attribute.KeyValue {
- return WebEngineVersionKey.String(val)
-}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
deleted file mode 100644
index fcdb9f485..000000000
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/metric.go
+++ /dev/null
@@ -1,1307 +0,0 @@
-// Copyright The OpenTelemetry Authors
-// SPDX-License-Identifier: Apache-2.0
-
-// Code generated from semantic convention specification. DO NOT EDIT.
-
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
-
-const (
-
- // ContainerCPUTime is the metric conforming to the "container.cpu.time"
- // semantic conventions. It represents the total CPU time consumed.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ContainerCPUTimeName = "container.cpu.time"
- ContainerCPUTimeUnit = "s"
- ContainerCPUTimeDescription = "Total CPU time consumed"
-
- // ContainerMemoryUsage is the metric conforming to the
- // "container.memory.usage" semantic conventions. It represents the memory
- // usage of the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerMemoryUsageName = "container.memory.usage"
- ContainerMemoryUsageUnit = "By"
- ContainerMemoryUsageDescription = "Memory usage of the container."
-
- // ContainerDiskIo is the metric conforming to the "container.disk.io" semantic
- // conventions. It represents the disk bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerDiskIoName = "container.disk.io"
- ContainerDiskIoUnit = "By"
- ContainerDiskIoDescription = "Disk bytes for the container."
-
- // ContainerNetworkIo is the metric conforming to the "container.network.io"
- // semantic conventions. It represents the network bytes for the container.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ContainerNetworkIoName = "container.network.io"
- ContainerNetworkIoUnit = "By"
- ContainerNetworkIoDescription = "Network bytes for the container."
-
- // DBClientOperationDuration is the metric conforming to the
- // "db.client.operation.duration" semantic conventions. It represents the
- // duration of database client operations.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientOperationDurationName = "db.client.operation.duration"
- DBClientOperationDurationUnit = "s"
- DBClientOperationDurationDescription = "Duration of database client operations."
-
- // DBClientConnectionCount is the metric conforming to the
- // "db.client.connection.count" semantic conventions. It represents the number
- // of connections that are currently in state described by the `state`
- // attribute.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionCountName = "db.client.connection.count"
- DBClientConnectionCountUnit = "{connection}"
- DBClientConnectionCountDescription = "The number of connections that are currently in state described by the `state` attribute"
-
- // DBClientConnectionIdleMax is the metric conforming to the
- // "db.client.connection.idle.max" semantic conventions. It represents the
- // maximum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMaxName = "db.client.connection.idle.max"
- DBClientConnectionIdleMaxUnit = "{connection}"
- DBClientConnectionIdleMaxDescription = "The maximum number of idle open connections allowed"
-
- // DBClientConnectionIdleMin is the metric conforming to the
- // "db.client.connection.idle.min" semantic conventions. It represents the
- // minimum number of idle open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionIdleMinName = "db.client.connection.idle.min"
- DBClientConnectionIdleMinUnit = "{connection}"
- DBClientConnectionIdleMinDescription = "The minimum number of idle open connections allowed"
-
- // DBClientConnectionMax is the metric conforming to the
- // "db.client.connection.max" semantic conventions. It represents the maximum
- // number of open connections allowed.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionMaxName = "db.client.connection.max"
- DBClientConnectionMaxUnit = "{connection}"
- DBClientConnectionMaxDescription = "The maximum number of open connections allowed"
-
- // DBClientConnectionPendingRequests is the metric conforming to the
- // "db.client.connection.pending_requests" semantic conventions. It represents
- // the number of pending requests for an open connection, cumulative for the
- // entire pool.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionPendingRequestsName = "db.client.connection.pending_requests"
- DBClientConnectionPendingRequestsUnit = "{request}"
- DBClientConnectionPendingRequestsDescription = "The number of pending requests for an open connection, cumulative for the entire pool"
-
- // DBClientConnectionTimeouts is the metric conforming to the
- // "db.client.connection.timeouts" semantic conventions. It represents the
- // number of connection timeouts that have occurred trying to obtain a
- // connection from the pool.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionTimeoutsName = "db.client.connection.timeouts"
- DBClientConnectionTimeoutsUnit = "{timeout}"
- DBClientConnectionTimeoutsDescription = "The number of connection timeouts that have occurred trying to obtain a connection from the pool"
-
- // DBClientConnectionCreateTime is the metric conforming to the
- // "db.client.connection.create_time" semantic conventions. It represents the
- // time it took to create a new connection.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionCreateTimeName = "db.client.connection.create_time"
- DBClientConnectionCreateTimeUnit = "s"
- DBClientConnectionCreateTimeDescription = "The time it took to create a new connection"
-
- // DBClientConnectionWaitTime is the metric conforming to the
- // "db.client.connection.wait_time" semantic conventions. It represents the
- // time it took to obtain an open connection from the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionWaitTimeName = "db.client.connection.wait_time"
- DBClientConnectionWaitTimeUnit = "s"
- DBClientConnectionWaitTimeDescription = "The time it took to obtain an open connection from the pool"
-
- // DBClientConnectionUseTime is the metric conforming to the
- // "db.client.connection.use_time" semantic conventions. It represents the time
- // between borrowing a connection and returning it to the pool.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DBClientConnectionUseTimeName = "db.client.connection.use_time"
- DBClientConnectionUseTimeUnit = "s"
- DBClientConnectionUseTimeDescription = "The time between borrowing a connection and returning it to the pool"
-
- // DBClientConnectionsUsage is the metric conforming to the
- // "db.client.connections.usage" semantic conventions. It represents the
- // deprecated, use `db.client.connection.count` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsUsageName = "db.client.connections.usage"
- DBClientConnectionsUsageUnit = "{connection}"
- DBClientConnectionsUsageDescription = "Deprecated, use `db.client.connection.count` instead."
-
- // DBClientConnectionsIdleMax is the metric conforming to the
- // "db.client.connections.idle.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMaxName = "db.client.connections.idle.max"
- DBClientConnectionsIdleMaxUnit = "{connection}"
- DBClientConnectionsIdleMaxDescription = "Deprecated, use `db.client.connection.idle.max` instead."
-
- // DBClientConnectionsIdleMin is the metric conforming to the
- // "db.client.connections.idle.min" semantic conventions. It represents the
- // deprecated, use `db.client.connection.idle.min` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsIdleMinName = "db.client.connections.idle.min"
- DBClientConnectionsIdleMinUnit = "{connection}"
- DBClientConnectionsIdleMinDescription = "Deprecated, use `db.client.connection.idle.min` instead."
-
- // DBClientConnectionsMax is the metric conforming to the
- // "db.client.connections.max" semantic conventions. It represents the
- // deprecated, use `db.client.connection.max` instead.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- DBClientConnectionsMaxName = "db.client.connections.max"
- DBClientConnectionsMaxUnit = "{connection}"
- DBClientConnectionsMaxDescription = "Deprecated, use `db.client.connection.max` instead."
-
- // DBClientConnectionsPendingRequests is the metric conforming to the
- // "db.client.connections.pending_requests" semantic conventions. It represents
- // the deprecated, use `db.client.connection.pending_requests` instead.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- DBClientConnectionsPendingRequestsName = "db.client.connections.pending_requests"
- DBClientConnectionsPendingRequestsUnit = "{request}"
- DBClientConnectionsPendingRequestsDescription = "Deprecated, use `db.client.connection.pending_requests` instead."
-
- // DBClientConnectionsTimeouts is the metric conforming to the
- // "db.client.connections.timeouts" semantic conventions. It represents the
- // deprecated, use `db.client.connection.timeouts` instead.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- DBClientConnectionsTimeoutsName = "db.client.connections.timeouts"
- DBClientConnectionsTimeoutsUnit = "{timeout}"
- DBClientConnectionsTimeoutsDescription = "Deprecated, use `db.client.connection.timeouts` instead."
-
- // DBClientConnectionsCreateTime is the metric conforming to the
- // "db.client.connections.create_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.create_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsCreateTimeName = "db.client.connections.create_time"
- DBClientConnectionsCreateTimeUnit = "ms"
- DBClientConnectionsCreateTimeDescription = "Deprecated, use `db.client.connection.create_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsWaitTime is the metric conforming to the
- // "db.client.connections.wait_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.wait_time` instead. Note: the unit
- // also changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsWaitTimeName = "db.client.connections.wait_time"
- DBClientConnectionsWaitTimeUnit = "ms"
- DBClientConnectionsWaitTimeDescription = "Deprecated, use `db.client.connection.wait_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DBClientConnectionsUseTime is the metric conforming to the
- // "db.client.connections.use_time" semantic conventions. It represents the
- // deprecated, use `db.client.connection.use_time` instead. Note: the unit also
- // changed from `ms` to `s`.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- DBClientConnectionsUseTimeName = "db.client.connections.use_time"
- DBClientConnectionsUseTimeUnit = "ms"
- DBClientConnectionsUseTimeDescription = "Deprecated, use `db.client.connection.use_time` instead. Note: the unit also changed from `ms` to `s`."
-
- // DNSLookupDuration is the metric conforming to the "dns.lookup.duration"
- // semantic conventions. It represents the measures the time taken to perform a
- // DNS lookup.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- DNSLookupDurationName = "dns.lookup.duration"
- DNSLookupDurationUnit = "s"
- DNSLookupDurationDescription = "Measures the time taken to perform a DNS lookup."
-
- // AspnetcoreRoutingMatchAttempts is the metric conforming to the
- // "aspnetcore.routing.match_attempts" semantic conventions. It represents the
- // number of requests that were attempted to be matched to an endpoint.
- // Instrument: counter
- // Unit: {match_attempt}
- // Stability: Stable
- AspnetcoreRoutingMatchAttemptsName = "aspnetcore.routing.match_attempts"
- AspnetcoreRoutingMatchAttemptsUnit = "{match_attempt}"
- AspnetcoreRoutingMatchAttemptsDescription = "Number of requests that were attempted to be matched to an endpoint."
-
- // AspnetcoreDiagnosticsExceptions is the metric conforming to the
- // "aspnetcore.diagnostics.exceptions" semantic conventions. It represents the
- // number of exceptions caught by exception handling middleware.
- // Instrument: counter
- // Unit: {exception}
- // Stability: Stable
- AspnetcoreDiagnosticsExceptionsName = "aspnetcore.diagnostics.exceptions"
- AspnetcoreDiagnosticsExceptionsUnit = "{exception}"
- AspnetcoreDiagnosticsExceptionsDescription = "Number of exceptions caught by exception handling middleware."
-
- // AspnetcoreRateLimitingActiveRequestLeases is the metric conforming to the
- // "aspnetcore.rate_limiting.active_request_leases" semantic conventions. It
- // represents the number of requests that are currently active on the server
- // that hold a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingActiveRequestLeasesName = "aspnetcore.rate_limiting.active_request_leases"
- AspnetcoreRateLimitingActiveRequestLeasesUnit = "{request}"
- AspnetcoreRateLimitingActiveRequestLeasesDescription = "Number of requests that are currently active on the server that hold a rate limiting lease."
-
- // AspnetcoreRateLimitingRequestLeaseDuration is the metric conforming to the
- // "aspnetcore.rate_limiting.request_lease.duration" semantic conventions. It
- // represents the duration of rate limiting lease held by requests on the
- // server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestLeaseDurationName = "aspnetcore.rate_limiting.request_lease.duration"
- AspnetcoreRateLimitingRequestLeaseDurationUnit = "s"
- AspnetcoreRateLimitingRequestLeaseDurationDescription = "The duration of rate limiting lease held by requests on the server."
-
- // AspnetcoreRateLimitingRequestTimeInQueue is the metric conforming to the
- // "aspnetcore.rate_limiting.request.time_in_queue" semantic conventions. It
- // represents the time the request spent in a queue waiting to acquire a rate
- // limiting lease.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- AspnetcoreRateLimitingRequestTimeInQueueName = "aspnetcore.rate_limiting.request.time_in_queue"
- AspnetcoreRateLimitingRequestTimeInQueueUnit = "s"
- AspnetcoreRateLimitingRequestTimeInQueueDescription = "The time the request spent in a queue waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingQueuedRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.queued_requests" semantic conventions. It
- // represents the number of requests that are currently queued, waiting to
- // acquire a rate limiting lease.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingQueuedRequestsName = "aspnetcore.rate_limiting.queued_requests"
- AspnetcoreRateLimitingQueuedRequestsUnit = "{request}"
- AspnetcoreRateLimitingQueuedRequestsDescription = "Number of requests that are currently queued, waiting to acquire a rate limiting lease."
-
- // AspnetcoreRateLimitingRequests is the metric conforming to the
- // "aspnetcore.rate_limiting.requests" semantic conventions. It represents the
- // number of requests that tried to acquire a rate limiting lease.
- // Instrument: counter
- // Unit: {request}
- // Stability: Stable
- AspnetcoreRateLimitingRequestsName = "aspnetcore.rate_limiting.requests"
- AspnetcoreRateLimitingRequestsUnit = "{request}"
- AspnetcoreRateLimitingRequestsDescription = "Number of requests that tried to acquire a rate limiting lease."
-
- // KestrelActiveConnections is the metric conforming to the
- // "kestrel.active_connections" semantic conventions. It represents the number
- // of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelActiveConnectionsName = "kestrel.active_connections"
- KestrelActiveConnectionsUnit = "{connection}"
- KestrelActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // KestrelConnectionDuration is the metric conforming to the
- // "kestrel.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelConnectionDurationName = "kestrel.connection.duration"
- KestrelConnectionDurationUnit = "s"
- KestrelConnectionDurationDescription = "The duration of connections on the server."
-
- // KestrelRejectedConnections is the metric conforming to the
- // "kestrel.rejected_connections" semantic conventions. It represents the
- // number of connections rejected by the server.
- // Instrument: counter
- // Unit: {connection}
- // Stability: Stable
- KestrelRejectedConnectionsName = "kestrel.rejected_connections"
- KestrelRejectedConnectionsUnit = "{connection}"
- KestrelRejectedConnectionsDescription = "Number of connections rejected by the server."
-
- // KestrelQueuedConnections is the metric conforming to the
- // "kestrel.queued_connections" semantic conventions. It represents the number
- // of connections that are currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelQueuedConnectionsName = "kestrel.queued_connections"
- KestrelQueuedConnectionsUnit = "{connection}"
- KestrelQueuedConnectionsDescription = "Number of connections that are currently queued and are waiting to start."
-
- // KestrelQueuedRequests is the metric conforming to the
- // "kestrel.queued_requests" semantic conventions. It represents the number of
- // HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are
- // currently queued and are waiting to start.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Stable
- KestrelQueuedRequestsName = "kestrel.queued_requests"
- KestrelQueuedRequestsUnit = "{request}"
- KestrelQueuedRequestsDescription = "Number of HTTP requests on multiplexed connections (HTTP/2 and HTTP/3) that are currently queued and are waiting to start."
-
- // KestrelUpgradedConnections is the metric conforming to the
- // "kestrel.upgraded_connections" semantic conventions. It represents the
- // number of connections that are currently upgraded (WebSockets). .
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- KestrelUpgradedConnectionsName = "kestrel.upgraded_connections"
- KestrelUpgradedConnectionsUnit = "{connection}"
- KestrelUpgradedConnectionsDescription = "Number of connections that are currently upgraded (WebSockets). ."
-
- // KestrelTLSHandshakeDuration is the metric conforming to the
- // "kestrel.tls_handshake.duration" semantic conventions. It represents the
- // duration of TLS handshakes on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- KestrelTLSHandshakeDurationName = "kestrel.tls_handshake.duration"
- KestrelTLSHandshakeDurationUnit = "s"
- KestrelTLSHandshakeDurationDescription = "The duration of TLS handshakes on the server."
-
- // KestrelActiveTLSHandshakes is the metric conforming to the
- // "kestrel.active_tls_handshakes" semantic conventions. It represents the
- // number of TLS handshakes that are currently in progress on the server.
- // Instrument: updowncounter
- // Unit: {handshake}
- // Stability: Stable
- KestrelActiveTLSHandshakesName = "kestrel.active_tls_handshakes"
- KestrelActiveTLSHandshakesUnit = "{handshake}"
- KestrelActiveTLSHandshakesDescription = "Number of TLS handshakes that are currently in progress on the server."
-
- // SignalrServerConnectionDuration is the metric conforming to the
- // "signalr.server.connection.duration" semantic conventions. It represents the
- // duration of connections on the server.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- SignalrServerConnectionDurationName = "signalr.server.connection.duration"
- SignalrServerConnectionDurationUnit = "s"
- SignalrServerConnectionDurationDescription = "The duration of connections on the server."
-
- // SignalrServerActiveConnections is the metric conforming to the
- // "signalr.server.active_connections" semantic conventions. It represents the
- // number of connections that are currently active on the server.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Stable
- SignalrServerActiveConnectionsName = "signalr.server.active_connections"
- SignalrServerActiveConnectionsUnit = "{connection}"
- SignalrServerActiveConnectionsDescription = "Number of connections that are currently active on the server."
-
- // FaaSInvokeDuration is the metric conforming to the "faas.invoke_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's logic execution.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInvokeDurationName = "faas.invoke_duration"
- FaaSInvokeDurationUnit = "s"
- FaaSInvokeDurationDescription = "Measures the duration of the function's logic execution"
-
- // FaaSInitDuration is the metric conforming to the "faas.init_duration"
- // semantic conventions. It represents the measures the duration of the
- // function's initialization, such as a cold start.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSInitDurationName = "faas.init_duration"
- FaaSInitDurationUnit = "s"
- FaaSInitDurationDescription = "Measures the duration of the function's initialization, such as a cold start"
-
- // FaaSColdstarts is the metric conforming to the "faas.coldstarts" semantic
- // conventions. It represents the number of invocation cold starts.
- // Instrument: counter
- // Unit: {coldstart}
- // Stability: Experimental
- FaaSColdstartsName = "faas.coldstarts"
- FaaSColdstartsUnit = "{coldstart}"
- FaaSColdstartsDescription = "Number of invocation cold starts"
-
- // FaaSErrors is the metric conforming to the "faas.errors" semantic
- // conventions. It represents the number of invocation errors.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- FaaSErrorsName = "faas.errors"
- FaaSErrorsUnit = "{error}"
- FaaSErrorsDescription = "Number of invocation errors"
-
- // FaaSInvocations is the metric conforming to the "faas.invocations" semantic
- // conventions. It represents the number of successful invocations.
- // Instrument: counter
- // Unit: {invocation}
- // Stability: Experimental
- FaaSInvocationsName = "faas.invocations"
- FaaSInvocationsUnit = "{invocation}"
- FaaSInvocationsDescription = "Number of successful invocations"
-
- // FaaSTimeouts is the metric conforming to the "faas.timeouts" semantic
- // conventions. It represents the number of invocation timeouts.
- // Instrument: counter
- // Unit: {timeout}
- // Stability: Experimental
- FaaSTimeoutsName = "faas.timeouts"
- FaaSTimeoutsUnit = "{timeout}"
- FaaSTimeoutsDescription = "Number of invocation timeouts"
-
- // FaaSMemUsage is the metric conforming to the "faas.mem_usage" semantic
- // conventions. It represents the distribution of max memory usage per
- // invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSMemUsageName = "faas.mem_usage"
- FaaSMemUsageUnit = "By"
- FaaSMemUsageDescription = "Distribution of max memory usage per invocation"
-
- // FaaSCPUUsage is the metric conforming to the "faas.cpu_usage" semantic
- // conventions. It represents the distribution of CPU usage per invocation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- FaaSCPUUsageName = "faas.cpu_usage"
- FaaSCPUUsageUnit = "s"
- FaaSCPUUsageDescription = "Distribution of CPU usage per invocation"
-
- // FaaSNetIo is the metric conforming to the "faas.net_io" semantic
- // conventions. It represents the distribution of net I/O usage per invocation.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- FaaSNetIoName = "faas.net_io"
- FaaSNetIoUnit = "By"
- FaaSNetIoDescription = "Distribution of net I/O usage per invocation"
-
- // HTTPServerRequestDuration is the metric conforming to the
- // "http.server.request.duration" semantic conventions. It represents the
- // duration of HTTP server requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPServerRequestDurationName = "http.server.request.duration"
- HTTPServerRequestDurationUnit = "s"
- HTTPServerRequestDurationDescription = "Duration of HTTP server requests."
-
- // HTTPServerActiveRequests is the metric conforming to the
- // "http.server.active_requests" semantic conventions. It represents the number
- // of active HTTP server requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPServerActiveRequestsName = "http.server.active_requests"
- HTTPServerActiveRequestsUnit = "{request}"
- HTTPServerActiveRequestsDescription = "Number of active HTTP server requests."
-
- // HTTPServerRequestBodySize is the metric conforming to the
- // "http.server.request.body.size" semantic conventions. It represents the size
- // of HTTP server request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerRequestBodySizeName = "http.server.request.body.size"
- HTTPServerRequestBodySizeUnit = "By"
- HTTPServerRequestBodySizeDescription = "Size of HTTP server request bodies."
-
- // HTTPServerResponseBodySize is the metric conforming to the
- // "http.server.response.body.size" semantic conventions. It represents the
- // size of HTTP server response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPServerResponseBodySizeName = "http.server.response.body.size"
- HTTPServerResponseBodySizeUnit = "By"
- HTTPServerResponseBodySizeDescription = "Size of HTTP server response bodies."
-
- // HTTPClientRequestDuration is the metric conforming to the
- // "http.client.request.duration" semantic conventions. It represents the
- // duration of HTTP client requests.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- HTTPClientRequestDurationName = "http.client.request.duration"
- HTTPClientRequestDurationUnit = "s"
- HTTPClientRequestDurationDescription = "Duration of HTTP client requests."
-
- // HTTPClientRequestBodySize is the metric conforming to the
- // "http.client.request.body.size" semantic conventions. It represents the size
- // of HTTP client request bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientRequestBodySizeName = "http.client.request.body.size"
- HTTPClientRequestBodySizeUnit = "By"
- HTTPClientRequestBodySizeDescription = "Size of HTTP client request bodies."
-
- // HTTPClientResponseBodySize is the metric conforming to the
- // "http.client.response.body.size" semantic conventions. It represents the
- // size of HTTP client response bodies.
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- HTTPClientResponseBodySizeName = "http.client.response.body.size"
- HTTPClientResponseBodySizeUnit = "By"
- HTTPClientResponseBodySizeDescription = "Size of HTTP client response bodies."
-
- // HTTPClientOpenConnections is the metric conforming to the
- // "http.client.open_connections" semantic conventions. It represents the
- // number of outbound HTTP connections that are currently active or idle on the
- // client.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- HTTPClientOpenConnectionsName = "http.client.open_connections"
- HTTPClientOpenConnectionsUnit = "{connection}"
- HTTPClientOpenConnectionsDescription = "Number of outbound HTTP connections that are currently active or idle on the client."
-
- // HTTPClientConnectionDuration is the metric conforming to the
- // "http.client.connection.duration" semantic conventions. It represents the
- // duration of the successfully established outbound HTTP connections.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- HTTPClientConnectionDurationName = "http.client.connection.duration"
- HTTPClientConnectionDurationUnit = "s"
- HTTPClientConnectionDurationDescription = "The duration of the successfully established outbound HTTP connections."
-
- // HTTPClientActiveRequests is the metric conforming to the
- // "http.client.active_requests" semantic conventions. It represents the number
- // of active HTTP requests.
- // Instrument: updowncounter
- // Unit: {request}
- // Stability: Experimental
- HTTPClientActiveRequestsName = "http.client.active_requests"
- HTTPClientActiveRequestsUnit = "{request}"
- HTTPClientActiveRequestsDescription = "Number of active HTTP requests."
-
- // JvmMemoryInit is the metric conforming to the "jvm.memory.init" semantic
- // conventions. It represents the measure of initial memory requested.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmMemoryInitName = "jvm.memory.init"
- JvmMemoryInitUnit = "By"
- JvmMemoryInitDescription = "Measure of initial memory requested."
-
- // JvmSystemCPUUtilization is the metric conforming to the
- // "jvm.system.cpu.utilization" semantic conventions. It represents the recent
- // CPU utilization for the whole system as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- JvmSystemCPUUtilizationName = "jvm.system.cpu.utilization"
- JvmSystemCPUUtilizationUnit = "1"
- JvmSystemCPUUtilizationDescription = "Recent CPU utilization for the whole system as reported by the JVM."
-
- // JvmSystemCPULoad1m is the metric conforming to the "jvm.system.cpu.load_1m"
- // semantic conventions. It represents the average CPU load of the whole system
- // for the last minute as reported by the JVM.
- // Instrument: gauge
- // Unit: {run_queue_item}
- // Stability: Experimental
- JvmSystemCPULoad1mName = "jvm.system.cpu.load_1m"
- JvmSystemCPULoad1mUnit = "{run_queue_item}"
- JvmSystemCPULoad1mDescription = "Average CPU load of the whole system for the last minute as reported by the JVM."
-
- // JvmBufferMemoryUsage is the metric conforming to the
- // "jvm.buffer.memory.usage" semantic conventions. It represents the measure of
- // memory used by buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryUsageName = "jvm.buffer.memory.usage"
- JvmBufferMemoryUsageUnit = "By"
- JvmBufferMemoryUsageDescription = "Measure of memory used by buffers."
-
- // JvmBufferMemoryLimit is the metric conforming to the
- // "jvm.buffer.memory.limit" semantic conventions. It represents the measure of
- // total memory capacity of buffers.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- JvmBufferMemoryLimitName = "jvm.buffer.memory.limit"
- JvmBufferMemoryLimitUnit = "By"
- JvmBufferMemoryLimitDescription = "Measure of total memory capacity of buffers."
-
- // JvmBufferCount is the metric conforming to the "jvm.buffer.count" semantic
- // conventions. It represents the number of buffers in the pool.
- // Instrument: updowncounter
- // Unit: {buffer}
- // Stability: Experimental
- JvmBufferCountName = "jvm.buffer.count"
- JvmBufferCountUnit = "{buffer}"
- JvmBufferCountDescription = "Number of buffers in the pool."
-
- // JvmMemoryUsed is the metric conforming to the "jvm.memory.used" semantic
- // conventions. It represents the measure of memory used.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedName = "jvm.memory.used"
- JvmMemoryUsedUnit = "By"
- JvmMemoryUsedDescription = "Measure of memory used."
-
- // JvmMemoryCommitted is the metric conforming to the "jvm.memory.committed"
- // semantic conventions. It represents the measure of memory committed.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryCommittedName = "jvm.memory.committed"
- JvmMemoryCommittedUnit = "By"
- JvmMemoryCommittedDescription = "Measure of memory committed."
-
- // JvmMemoryLimit is the metric conforming to the "jvm.memory.limit" semantic
- // conventions. It represents the measure of max obtainable memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryLimitName = "jvm.memory.limit"
- JvmMemoryLimitUnit = "By"
- JvmMemoryLimitDescription = "Measure of max obtainable memory."
-
- // JvmMemoryUsedAfterLastGc is the metric conforming to the
- // "jvm.memory.used_after_last_gc" semantic conventions. It represents the
- // measure of memory used, as measured after the most recent garbage collection
- // event on this pool.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Stable
- JvmMemoryUsedAfterLastGcName = "jvm.memory.used_after_last_gc"
- JvmMemoryUsedAfterLastGcUnit = "By"
- JvmMemoryUsedAfterLastGcDescription = "Measure of memory used, as measured after the most recent garbage collection event on this pool."
-
- // JvmGcDuration is the metric conforming to the "jvm.gc.duration" semantic
- // conventions. It represents the duration of JVM garbage collection actions.
- // Instrument: histogram
- // Unit: s
- // Stability: Stable
- JvmGcDurationName = "jvm.gc.duration"
- JvmGcDurationUnit = "s"
- JvmGcDurationDescription = "Duration of JVM garbage collection actions."
-
- // JvmThreadCount is the metric conforming to the "jvm.thread.count" semantic
- // conventions. It represents the number of executing platform threads.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Stable
- JvmThreadCountName = "jvm.thread.count"
- JvmThreadCountUnit = "{thread}"
- JvmThreadCountDescription = "Number of executing platform threads."
-
- // JvmClassLoaded is the metric conforming to the "jvm.class.loaded" semantic
- // conventions. It represents the number of classes loaded since JVM start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassLoadedName = "jvm.class.loaded"
- JvmClassLoadedUnit = "{class}"
- JvmClassLoadedDescription = "Number of classes loaded since JVM start."
-
- // JvmClassUnloaded is the metric conforming to the "jvm.class.unloaded"
- // semantic conventions. It represents the number of classes unloaded since JVM
- // start.
- // Instrument: counter
- // Unit: {class}
- // Stability: Stable
- JvmClassUnloadedName = "jvm.class.unloaded"
- JvmClassUnloadedUnit = "{class}"
- JvmClassUnloadedDescription = "Number of classes unloaded since JVM start."
-
- // JvmClassCount is the metric conforming to the "jvm.class.count" semantic
- // conventions. It represents the number of classes currently loaded.
- // Instrument: updowncounter
- // Unit: {class}
- // Stability: Stable
- JvmClassCountName = "jvm.class.count"
- JvmClassCountUnit = "{class}"
- JvmClassCountDescription = "Number of classes currently loaded."
-
- // JvmCPUCount is the metric conforming to the "jvm.cpu.count" semantic
- // conventions. It represents the number of processors available to the Java
- // virtual machine.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Stable
- JvmCPUCountName = "jvm.cpu.count"
- JvmCPUCountUnit = "{cpu}"
- JvmCPUCountDescription = "Number of processors available to the Java virtual machine."
-
- // JvmCPUTime is the metric conforming to the "jvm.cpu.time" semantic
- // conventions. It represents the cPU time used by the process as reported by
- // the JVM.
- // Instrument: counter
- // Unit: s
- // Stability: Stable
- JvmCPUTimeName = "jvm.cpu.time"
- JvmCPUTimeUnit = "s"
- JvmCPUTimeDescription = "CPU time used by the process as reported by the JVM."
-
- // JvmCPURecentUtilization is the metric conforming to the
- // "jvm.cpu.recent_utilization" semantic conventions. It represents the recent
- // CPU utilization for the process as reported by the JVM.
- // Instrument: gauge
- // Unit: 1
- // Stability: Stable
- JvmCPURecentUtilizationName = "jvm.cpu.recent_utilization"
- JvmCPURecentUtilizationUnit = "1"
- JvmCPURecentUtilizationDescription = "Recent CPU utilization for the process as reported by the JVM."
-
- // MessagingPublishDuration is the metric conforming to the
- // "messaging.publish.duration" semantic conventions. It represents the
- // measures the duration of publish operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingPublishDurationName = "messaging.publish.duration"
- MessagingPublishDurationUnit = "s"
- MessagingPublishDurationDescription = "Measures the duration of publish operation."
-
- // MessagingReceiveDuration is the metric conforming to the
- // "messaging.receive.duration" semantic conventions. It represents the
- // measures the duration of receive operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingReceiveDurationName = "messaging.receive.duration"
- MessagingReceiveDurationUnit = "s"
- MessagingReceiveDurationDescription = "Measures the duration of receive operation."
-
- // MessagingProcessDuration is the metric conforming to the
- // "messaging.process.duration" semantic conventions. It represents the
- // measures the duration of process operation.
- // Instrument: histogram
- // Unit: s
- // Stability: Experimental
- MessagingProcessDurationName = "messaging.process.duration"
- MessagingProcessDurationUnit = "s"
- MessagingProcessDurationDescription = "Measures the duration of process operation."
-
- // MessagingPublishMessages is the metric conforming to the
- // "messaging.publish.messages" semantic conventions. It represents the
- // measures the number of published messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingPublishMessagesName = "messaging.publish.messages"
- MessagingPublishMessagesUnit = "{message}"
- MessagingPublishMessagesDescription = "Measures the number of published messages."
-
- // MessagingReceiveMessages is the metric conforming to the
- // "messaging.receive.messages" semantic conventions. It represents the
- // measures the number of received messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingReceiveMessagesName = "messaging.receive.messages"
- MessagingReceiveMessagesUnit = "{message}"
- MessagingReceiveMessagesDescription = "Measures the number of received messages."
-
- // MessagingProcessMessages is the metric conforming to the
- // "messaging.process.messages" semantic conventions. It represents the
- // measures the number of processed messages.
- // Instrument: counter
- // Unit: {message}
- // Stability: Experimental
- MessagingProcessMessagesName = "messaging.process.messages"
- MessagingProcessMessagesUnit = "{message}"
- MessagingProcessMessagesDescription = "Measures the number of processed messages."
-
- // ProcessCPUTime is the metric conforming to the "process.cpu.time" semantic
- // conventions. It represents the total CPU seconds broken down by different
- // states.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- ProcessCPUTimeName = "process.cpu.time"
- ProcessCPUTimeUnit = "s"
- ProcessCPUTimeDescription = "Total CPU seconds broken down by different states."
-
- // ProcessCPUUtilization is the metric conforming to the
- // "process.cpu.utilization" semantic conventions. It represents the difference
- // in process.cpu.time since the last measurement, divided by the elapsed time
- // and number of CPUs available to the process.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- ProcessCPUUtilizationName = "process.cpu.utilization"
- ProcessCPUUtilizationUnit = "1"
- ProcessCPUUtilizationDescription = "Difference in process.cpu.time since the last measurement, divided by the elapsed time and number of CPUs available to the process."
-
- // ProcessMemoryUsage is the metric conforming to the "process.memory.usage"
- // semantic conventions. It represents the amount of physical memory in use.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryUsageName = "process.memory.usage"
- ProcessMemoryUsageUnit = "By"
- ProcessMemoryUsageDescription = "The amount of physical memory in use."
-
- // ProcessMemoryVirtual is the metric conforming to the
- // "process.memory.virtual" semantic conventions. It represents the amount of
- // committed virtual memory.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- ProcessMemoryVirtualName = "process.memory.virtual"
- ProcessMemoryVirtualUnit = "By"
- ProcessMemoryVirtualDescription = "The amount of committed virtual memory."
-
- // ProcessDiskIo is the metric conforming to the "process.disk.io" semantic
- // conventions. It represents the disk bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessDiskIoName = "process.disk.io"
- ProcessDiskIoUnit = "By"
- ProcessDiskIoDescription = "Disk bytes transferred."
-
- // ProcessNetworkIo is the metric conforming to the "process.network.io"
- // semantic conventions. It represents the network bytes transferred.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- ProcessNetworkIoName = "process.network.io"
- ProcessNetworkIoUnit = "By"
- ProcessNetworkIoDescription = "Network bytes transferred."
-
- // ProcessThreadCount is the metric conforming to the "process.thread.count"
- // semantic conventions. It represents the process threads count.
- // Instrument: updowncounter
- // Unit: {thread}
- // Stability: Experimental
- ProcessThreadCountName = "process.thread.count"
- ProcessThreadCountUnit = "{thread}"
- ProcessThreadCountDescription = "Process threads count."
-
- // ProcessOpenFileDescriptorCount is the metric conforming to the
- // "process.open_file_descriptor.count" semantic conventions. It represents the
- // number of file descriptors in use by the process.
- // Instrument: updowncounter
- // Unit: {count}
- // Stability: Experimental
- ProcessOpenFileDescriptorCountName = "process.open_file_descriptor.count"
- ProcessOpenFileDescriptorCountUnit = "{count}"
- ProcessOpenFileDescriptorCountDescription = "Number of file descriptors in use by the process."
-
- // ProcessContextSwitches is the metric conforming to the
- // "process.context_switches" semantic conventions. It represents the number of
- // times the process has been context switched.
- // Instrument: counter
- // Unit: {count}
- // Stability: Experimental
- ProcessContextSwitchesName = "process.context_switches"
- ProcessContextSwitchesUnit = "{count}"
- ProcessContextSwitchesDescription = "Number of times the process has been context switched."
-
- // ProcessPagingFaults is the metric conforming to the "process.paging.faults"
- // semantic conventions. It represents the number of page faults the process
- // has made.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- ProcessPagingFaultsName = "process.paging.faults"
- ProcessPagingFaultsUnit = "{fault}"
- ProcessPagingFaultsDescription = "Number of page faults the process has made."
-
- // RPCServerDuration is the metric conforming to the "rpc.server.duration"
- // semantic conventions. It represents the measures the duration of inbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCServerDurationName = "rpc.server.duration"
- RPCServerDurationUnit = "ms"
- RPCServerDurationDescription = "Measures the duration of inbound RPC."
-
- // RPCServerRequestSize is the metric conforming to the
- // "rpc.server.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerRequestSizeName = "rpc.server.request.size"
- RPCServerRequestSizeUnit = "By"
- RPCServerRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCServerResponseSize is the metric conforming to the
- // "rpc.server.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCServerResponseSizeName = "rpc.server.response.size"
- RPCServerResponseSizeUnit = "By"
- RPCServerResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCServerRequestsPerRPC is the metric conforming to the
- // "rpc.server.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerRequestsPerRPCName = "rpc.server.requests_per_rpc"
- RPCServerRequestsPerRPCUnit = "{count}"
- RPCServerRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCServerResponsesPerRPC is the metric conforming to the
- // "rpc.server.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCServerResponsesPerRPCName = "rpc.server.responses_per_rpc"
- RPCServerResponsesPerRPCUnit = "{count}"
- RPCServerResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // RPCClientDuration is the metric conforming to the "rpc.client.duration"
- // semantic conventions. It represents the measures the duration of outbound
- // RPC.
- // Instrument: histogram
- // Unit: ms
- // Stability: Experimental
- RPCClientDurationName = "rpc.client.duration"
- RPCClientDurationUnit = "ms"
- RPCClientDurationDescription = "Measures the duration of outbound RPC."
-
- // RPCClientRequestSize is the metric conforming to the
- // "rpc.client.request.size" semantic conventions. It represents the measures
- // the size of RPC request messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientRequestSizeName = "rpc.client.request.size"
- RPCClientRequestSizeUnit = "By"
- RPCClientRequestSizeDescription = "Measures the size of RPC request messages (uncompressed)."
-
- // RPCClientResponseSize is the metric conforming to the
- // "rpc.client.response.size" semantic conventions. It represents the measures
- // the size of RPC response messages (uncompressed).
- // Instrument: histogram
- // Unit: By
- // Stability: Experimental
- RPCClientResponseSizeName = "rpc.client.response.size"
- RPCClientResponseSizeUnit = "By"
- RPCClientResponseSizeDescription = "Measures the size of RPC response messages (uncompressed)."
-
- // RPCClientRequestsPerRPC is the metric conforming to the
- // "rpc.client.requests_per_rpc" semantic conventions. It represents the
- // measures the number of messages received per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientRequestsPerRPCName = "rpc.client.requests_per_rpc"
- RPCClientRequestsPerRPCUnit = "{count}"
- RPCClientRequestsPerRPCDescription = "Measures the number of messages received per RPC."
-
- // RPCClientResponsesPerRPC is the metric conforming to the
- // "rpc.client.responses_per_rpc" semantic conventions. It represents the
- // measures the number of messages sent per RPC.
- // Instrument: histogram
- // Unit: {count}
- // Stability: Experimental
- RPCClientResponsesPerRPCName = "rpc.client.responses_per_rpc"
- RPCClientResponsesPerRPCUnit = "{count}"
- RPCClientResponsesPerRPCDescription = "Measures the number of messages sent per RPC."
-
- // SystemCPUTime is the metric conforming to the "system.cpu.time" semantic
- // conventions. It represents the seconds each logical CPU spent on each mode.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemCPUTimeName = "system.cpu.time"
- SystemCPUTimeUnit = "s"
- SystemCPUTimeDescription = "Seconds each logical CPU spent on each mode"
-
- // SystemCPUUtilization is the metric conforming to the
- // "system.cpu.utilization" semantic conventions. It represents the difference
- // in system.cpu.time since the last measurement, divided by the elapsed time
- // and number of logical CPUs.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- SystemCPUUtilizationName = "system.cpu.utilization"
- SystemCPUUtilizationUnit = "1"
- SystemCPUUtilizationDescription = "Difference in system.cpu.time since the last measurement, divided by the elapsed time and number of logical CPUs"
-
- // SystemCPUFrequency is the metric conforming to the "system.cpu.frequency"
- // semantic conventions. It represents the reports the current frequency of the
- // CPU in Hz.
- // Instrument: gauge
- // Unit: {Hz}
- // Stability: Experimental
- SystemCPUFrequencyName = "system.cpu.frequency"
- SystemCPUFrequencyUnit = "{Hz}"
- SystemCPUFrequencyDescription = "Reports the current frequency of the CPU in Hz"
-
- // SystemCPUPhysicalCount is the metric conforming to the
- // "system.cpu.physical.count" semantic conventions. It represents the reports
- // the number of actual physical processor cores on the hardware.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPUPhysicalCountName = "system.cpu.physical.count"
- SystemCPUPhysicalCountUnit = "{cpu}"
- SystemCPUPhysicalCountDescription = "Reports the number of actual physical processor cores on the hardware"
-
- // SystemCPULogicalCount is the metric conforming to the
- // "system.cpu.logical.count" semantic conventions. It represents the reports
- // the number of logical (virtual) processor cores created by the operating
- // system to manage multitasking.
- // Instrument: updowncounter
- // Unit: {cpu}
- // Stability: Experimental
- SystemCPULogicalCountName = "system.cpu.logical.count"
- SystemCPULogicalCountUnit = "{cpu}"
- SystemCPULogicalCountDescription = "Reports the number of logical (virtual) processor cores created by the operating system to manage multitasking"
-
- // SystemMemoryUsage is the metric conforming to the "system.memory.usage"
- // semantic conventions. It represents the reports memory in use by state.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryUsageName = "system.memory.usage"
- SystemMemoryUsageUnit = "By"
- SystemMemoryUsageDescription = "Reports memory in use by state."
-
- // SystemMemoryLimit is the metric conforming to the "system.memory.limit"
- // semantic conventions. It represents the total memory available in the
- // system.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemoryLimitName = "system.memory.limit"
- SystemMemoryLimitUnit = "By"
- SystemMemoryLimitDescription = "Total memory available in the system."
-
- // SystemMemoryShared is the metric conforming to the "system.memory.shared"
- // semantic conventions. It represents the shared memory used (mostly by
- // tmpfs).
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemMemorySharedName = "system.memory.shared"
- SystemMemorySharedUnit = "By"
- SystemMemorySharedDescription = "Shared memory used (mostly by tmpfs)."
-
- // SystemMemoryUtilization is the metric conforming to the
- // "system.memory.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemMemoryUtilizationName = "system.memory.utilization"
- SystemMemoryUtilizationUnit = "1"
-
- // SystemPagingUsage is the metric conforming to the "system.paging.usage"
- // semantic conventions. It represents the unix swap or windows pagefile usage.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemPagingUsageName = "system.paging.usage"
- SystemPagingUsageUnit = "By"
- SystemPagingUsageDescription = "Unix swap or windows pagefile usage"
-
- // SystemPagingUtilization is the metric conforming to the
- // "system.paging.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingUtilizationName = "system.paging.utilization"
- SystemPagingUtilizationUnit = "1"
-
- // SystemPagingFaults is the metric conforming to the "system.paging.faults"
- // semantic conventions.
- // Instrument: counter
- // Unit: {fault}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingFaultsName = "system.paging.faults"
- SystemPagingFaultsUnit = "{fault}"
-
- // SystemPagingOperations is the metric conforming to the
- // "system.paging.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemPagingOperationsName = "system.paging.operations"
- SystemPagingOperationsUnit = "{operation}"
-
- // SystemDiskIo is the metric conforming to the "system.disk.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskIoName = "system.disk.io"
- SystemDiskIoUnit = "By"
-
- // SystemDiskOperations is the metric conforming to the
- // "system.disk.operations" semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskOperationsName = "system.disk.operations"
- SystemDiskOperationsUnit = "{operation}"
-
- // SystemDiskIoTime is the metric conforming to the "system.disk.io_time"
- // semantic conventions. It represents the time disk spent activated.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskIoTimeName = "system.disk.io_time"
- SystemDiskIoTimeUnit = "s"
- SystemDiskIoTimeDescription = "Time disk spent activated"
-
- // SystemDiskOperationTime is the metric conforming to the
- // "system.disk.operation_time" semantic conventions. It represents the sum of
- // the time each operation took to complete.
- // Instrument: counter
- // Unit: s
- // Stability: Experimental
- SystemDiskOperationTimeName = "system.disk.operation_time"
- SystemDiskOperationTimeUnit = "s"
- SystemDiskOperationTimeDescription = "Sum of the time each operation took to complete"
-
- // SystemDiskMerged is the metric conforming to the "system.disk.merged"
- // semantic conventions.
- // Instrument: counter
- // Unit: {operation}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemDiskMergedName = "system.disk.merged"
- SystemDiskMergedUnit = "{operation}"
-
- // SystemFilesystemUsage is the metric conforming to the
- // "system.filesystem.usage" semantic conventions.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUsageName = "system.filesystem.usage"
- SystemFilesystemUsageUnit = "By"
-
- // SystemFilesystemUtilization is the metric conforming to the
- // "system.filesystem.utilization" semantic conventions.
- // Instrument: gauge
- // Unit: 1
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemFilesystemUtilizationName = "system.filesystem.utilization"
- SystemFilesystemUtilizationUnit = "1"
-
- // SystemNetworkDropped is the metric conforming to the
- // "system.network.dropped" semantic conventions. It represents the count of
- // packets that are dropped or discarded even though there was no error.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- SystemNetworkDroppedName = "system.network.dropped"
- SystemNetworkDroppedUnit = "{packet}"
- SystemNetworkDroppedDescription = "Count of packets that are dropped or discarded even though there was no error"
-
- // SystemNetworkPackets is the metric conforming to the
- // "system.network.packets" semantic conventions.
- // Instrument: counter
- // Unit: {packet}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkPacketsName = "system.network.packets"
- SystemNetworkPacketsUnit = "{packet}"
-
- // SystemNetworkErrors is the metric conforming to the "system.network.errors"
- // semantic conventions. It represents the count of network errors detected.
- // Instrument: counter
- // Unit: {error}
- // Stability: Experimental
- SystemNetworkErrorsName = "system.network.errors"
- SystemNetworkErrorsUnit = "{error}"
- SystemNetworkErrorsDescription = "Count of network errors detected"
-
- // SystemNetworkIo is the metric conforming to the "system.network.io" semantic
- // conventions.
- // Instrument: counter
- // Unit: By
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkIoName = "system.network.io"
- SystemNetworkIoUnit = "By"
-
- // SystemNetworkConnections is the metric conforming to the
- // "system.network.connections" semantic conventions.
- // Instrument: updowncounter
- // Unit: {connection}
- // Stability: Experimental
- // NOTE: The description (brief) for this metric is not defined in the semantic-conventions repository.
- SystemNetworkConnectionsName = "system.network.connections"
- SystemNetworkConnectionsUnit = "{connection}"
-
- // SystemProcessCount is the metric conforming to the "system.process.count"
- // semantic conventions. It represents the total number of processes in each
- // state.
- // Instrument: updowncounter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCountName = "system.process.count"
- SystemProcessCountUnit = "{process}"
- SystemProcessCountDescription = "Total number of processes in each state"
-
- // SystemProcessCreated is the metric conforming to the
- // "system.process.created" semantic conventions. It represents the total
- // number of processes created over uptime of the host.
- // Instrument: counter
- // Unit: {process}
- // Stability: Experimental
- SystemProcessCreatedName = "system.process.created"
- SystemProcessCreatedUnit = "{process}"
- SystemProcessCreatedDescription = "Total number of processes created over uptime of the host"
-
- // SystemLinuxMemoryAvailable is the metric conforming to the
- // "system.linux.memory.available" semantic conventions. It represents an
- // estimate of how much memory is available for starting new applications,
- // without causing swapping.
- // Instrument: updowncounter
- // Unit: By
- // Stability: Experimental
- SystemLinuxMemoryAvailableName = "system.linux.memory.available"
- SystemLinuxMemoryAvailableUnit = "By"
- SystemLinuxMemoryAvailableDescription = "An estimate of how much memory is available for starting new applications, without causing swapping"
-)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
index 5b5666257..98c0fddaa 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/attribute_group.go
@@ -3467,6 +3467,13 @@ func ContainerImageTags(val ...string) attribute.KeyValue {
return ContainerImageTagsKey.StringSlice(val)
}
+// ContainerLabel returns an attribute KeyValue conforming to the
+// "container.label" semantic conventions. It represents the container labels,
+// `` being the label name, the value being the label value.
+func ContainerLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("container.label."+key, val)
+}
+
// ContainerName returns an attribute KeyValue conforming to the "container.name"
// semantic conventions. It represents the container name used by container
// runtime.
@@ -3794,6 +3801,22 @@ func DBOperationName(val string) attribute.KeyValue {
return DBOperationNameKey.String(val)
}
+// DBOperationParameter returns an attribute KeyValue conforming to the
+// "db.operation.parameter" semantic conventions. It represents a database
+// operation parameter, with `` being the parameter name, and the attribute
+// value being a string representation of the parameter value.
+func DBOperationParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.operation.parameter."+key, val)
+}
+
+// DBQueryParameter returns an attribute KeyValue conforming to the
+// "db.query.parameter" semantic conventions. It represents a database query
+// parameter, with `` being the parameter name, and the attribute value
+// being a string representation of the parameter value.
+func DBQueryParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.query.parameter."+key, val)
+}
+
// DBQuerySummary returns an attribute KeyValue conforming to the
// "db.query.summary" semantic conventions. It represents the low cardinality
// summary of a database query.
@@ -7312,6 +7335,14 @@ func HTTPRequestBodySize(val int) attribute.KeyValue {
return HTTPRequestBodySizeKey.Int(val)
}
+// HTTPRequestHeader returns an attribute KeyValue conforming to the
+// "http.request.header" semantic conventions. It represents the HTTP request
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPRequestHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.request.header."+key, val)
+}
+
// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
// "http.request.method_original" semantic conventions. It represents the
// original HTTP method sent by the client in the request line.
@@ -7347,6 +7378,14 @@ func HTTPResponseBodySize(val int) attribute.KeyValue {
return HTTPResponseBodySizeKey.Int(val)
}
+// HTTPResponseHeader returns an attribute KeyValue conforming to the
+// "http.response.header" semantic conventions. It represents the HTTP response
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPResponseHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.response.header."+key, val)
+}
+
// HTTPResponseSize returns an attribute KeyValue conforming to the
// "http.response.size" semantic conventions. It represents the total size of the
// response in bytes. This should be the total number of bytes sent over the
@@ -8001,6 +8040,22 @@ func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
return K8SContainerStatusLastTerminatedReasonKey.String(val)
}
+// K8SCronJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob
+// annotation placed on the CronJob, the `` being the annotation name, the
+// value being the annotation value.
+func K8SCronJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.annotation."+key, val)
+}
+
+// K8SCronJobLabel returns an attribute KeyValue conforming to the
+// "k8s.cronjob.label" semantic conventions. It represents the label placed on
+// the CronJob, the `` being the label name, the value being the label
+// value.
+func K8SCronJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.label."+key, val)
+}
+
// K8SCronJobName returns an attribute KeyValue conforming to the
// "k8s.cronjob.name" semantic conventions. It represents the name of the
// CronJob.
@@ -8014,6 +8069,20 @@ func K8SCronJobUID(val string) attribute.KeyValue {
return K8SCronJobUIDKey.String(val)
}
+// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.daemonset.annotation" semantic conventions. It represents the annotation
+// key-value pairs placed on the DaemonSet.
+func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.annotation."+key, val)
+}
+
+// K8SDaemonSetLabel returns an attribute KeyValue conforming to the
+// "k8s.daemonset.label" semantic conventions. It represents the label key-value
+// pairs placed on the DaemonSet.
+func K8SDaemonSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.label."+key, val)
+}
+
// K8SDaemonSetName returns an attribute KeyValue conforming to the
// "k8s.daemonset.name" semantic conventions. It represents the name of the
// DaemonSet.
@@ -8028,6 +8097,20 @@ func K8SDaemonSetUID(val string) attribute.KeyValue {
return K8SDaemonSetUIDKey.String(val)
}
+// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the
+// "k8s.deployment.annotation" semantic conventions. It represents the annotation
+// key-value pairs placed on the Deployment.
+func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.annotation."+key, val)
+}
+
+// K8SDeploymentLabel returns an attribute KeyValue conforming to the
+// "k8s.deployment.label" semantic conventions. It represents the label key-value
+// pairs placed on the Deployment.
+func K8SDeploymentLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.label."+key, val)
+}
+
// K8SDeploymentName returns an attribute KeyValue conforming to the
// "k8s.deployment.name" semantic conventions. It represents the name of the
// Deployment.
@@ -8054,6 +8137,20 @@ func K8SHPAUID(val string) attribute.KeyValue {
return K8SHPAUIDKey.String(val)
}
+// K8SJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.job.annotation" semantic conventions. It represents the annotation
+// key-value pairs placed on the Job.
+func K8SJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.annotation."+key, val)
+}
+
+// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label"
+// semantic conventions. It represents the label key-value pairs placed on the
+// Job.
+func K8SJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.label."+key, val)
+}
+
// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
// semantic conventions. It represents the name of the Job.
func K8SJobName(val string) attribute.KeyValue {
@@ -8066,6 +8163,20 @@ func K8SJobUID(val string) attribute.KeyValue {
return K8SJobUIDKey.String(val)
}
+// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the
+// "k8s.namespace.annotation" semantic conventions. It represents the annotation
+// key-value pairs placed on the Namespace.
+func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.annotation."+key, val)
+}
+
+// K8SNamespaceLabel returns an attribute KeyValue conforming to the
+// "k8s.namespace.label" semantic conventions. It represents the label key-value
+// pairs placed on the Namespace.
+func K8SNamespaceLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.label."+key, val)
+}
+
// K8SNamespaceName returns an attribute KeyValue conforming to the
// "k8s.namespace.name" semantic conventions. It represents the name of the
// namespace that the pod is running in.
@@ -8073,6 +8184,22 @@ func K8SNamespaceName(val string) attribute.KeyValue {
return K8SNamespaceNameKey.String(val)
}
+// K8SNodeAnnotation returns an attribute KeyValue conforming to the
+// "k8s.node.annotation" semantic conventions. It represents the annotation
+// placed on the Node, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SNodeAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.annotation."+key, val)
+}
+
+// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label"
+// semantic conventions. It represents the label placed on the Node, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SNodeLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.label."+key, val)
+}
+
// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
// semantic conventions. It represents the name of the Node.
func K8SNodeName(val string) attribute.KeyValue {
@@ -8085,6 +8212,21 @@ func K8SNodeUID(val string) attribute.KeyValue {
return K8SNodeUIDKey.String(val)
}
+// K8SPodAnnotation returns an attribute KeyValue conforming to the
+// "k8s.pod.annotation" semantic conventions. It represents the annotation placed
+// on the Pod, the `` being the annotation name, the value being the
+// annotation value.
+func K8SPodAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.annotation."+key, val)
+}
+
+// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label"
+// semantic conventions. It represents the label placed on the Pod, the ``
+// being the label name, the value being the label value.
+func K8SPodLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.label."+key, val)
+}
+
// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
// semantic conventions. It represents the name of the Pod.
func K8SPodName(val string) attribute.KeyValue {
@@ -8097,6 +8239,20 @@ func K8SPodUID(val string) attribute.KeyValue {
return K8SPodUIDKey.String(val)
}
+// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.replicaset.annotation" semantic conventions. It represents the annotation
+// key-value pairs placed on the ReplicaSet.
+func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.annotation."+key, val)
+}
+
+// K8SReplicaSetLabel returns an attribute KeyValue conforming to the
+// "k8s.replicaset.label" semantic conventions. It represents the label key-value
+// pairs placed on the ReplicaSet.
+func K8SReplicaSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.label."+key, val)
+}
+
// K8SReplicaSetName returns an attribute KeyValue conforming to the
// "k8s.replicaset.name" semantic conventions. It represents the name of the
// ReplicaSet.
@@ -8139,6 +8295,20 @@ func K8SResourceQuotaUID(val string) attribute.KeyValue {
return K8SResourceQuotaUIDKey.String(val)
}
+// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.statefulset.annotation" semantic conventions. It represents the
+// annotation key-value pairs placed on the StatefulSet.
+func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.annotation."+key, val)
+}
+
+// K8SStatefulSetLabel returns an attribute KeyValue conforming to the
+// "k8s.statefulset.label" semantic conventions. It represents the label
+// key-value pairs placed on the StatefulSet.
+func K8SStatefulSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.label."+key, val)
+}
+
// K8SStatefulSetName returns an attribute KeyValue conforming to the
// "k8s.statefulset.name" semantic conventions. It represents the name of the
// StatefulSet.
@@ -10497,6 +10667,14 @@ func ProcessCreationTime(val string) attribute.KeyValue {
return ProcessCreationTimeKey.String(val)
}
+// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the
+// "process.environment_variable" semantic conventions. It represents the process
+// environment variables, being the environment variable name, the value
+// being the environment variable value.
+func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue {
+ return attribute.String("process.environment_variable."+key, val)
+}
+
// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
@@ -10965,6 +11143,38 @@ const (
RPCSystemKey = attribute.Key("rpc.system")
)
+// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the
+// connect request metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val)
+}
+
+// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the
+// connect response metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val)
+}
+
+// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC
+// request metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.request.metadata."+key, val)
+}
+
+// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC
+// response metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.response.metadata."+key, val)
+}
+
// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the
// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
// property of response if it is an error response.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go
new file mode 100644
index 000000000..19bf02246
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.34.0/error_type.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.34.0"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+func ErrorType(err error) attribute.KeyValue {
+ if err == nil {
+ return ErrorTypeOther
+ }
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return ErrorTypeOther
+ }
+ return ErrorTypeKey.String(value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md
new file mode 100644
index 000000000..248054789
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/MIGRATION.md
@@ -0,0 +1,41 @@
+
+# Migration from v1.36.0 to v1.37.0
+
+The `go.opentelemetry.io/otel/semconv/v1.37.0` package should be a drop-in replacement for `go.opentelemetry.io/otel/semconv/v1.36.0` with the following exceptions.
+
+## Removed
+
+The following declarations have been removed.
+Refer to the [OpenTelemetry Semantic Conventions documentation] for deprecation instructions.
+
+If the type is not listed in the documentation as deprecated, it has been removed in this version due to lack of applicability or use.
+If you use any of these non-deprecated declarations in your Go application, please [open an issue] describing your use-case.
+
+- `ContainerRuntime`
+- `ContainerRuntimeKey`
+- `GenAIOpenAIRequestServiceTierAuto`
+- `GenAIOpenAIRequestServiceTierDefault`
+- `GenAIOpenAIRequestServiceTierKey`
+- `GenAIOpenAIResponseServiceTier`
+- `GenAIOpenAIResponseServiceTierKey`
+- `GenAIOpenAIResponseSystemFingerprint`
+- `GenAIOpenAIResponseSystemFingerprintKey`
+- `GenAISystemAWSBedrock`
+- `GenAISystemAnthropic`
+- `GenAISystemAzureAIInference`
+- `GenAISystemAzureAIOpenAI`
+- `GenAISystemCohere`
+- `GenAISystemDeepseek`
+- `GenAISystemGCPGemini`
+- `GenAISystemGCPGenAI`
+- `GenAISystemGCPVertexAI`
+- `GenAISystemGroq`
+- `GenAISystemIBMWatsonxAI`
+- `GenAISystemKey`
+- `GenAISystemMistralAI`
+- `GenAISystemOpenAI`
+- `GenAISystemPerplexity`
+- `GenAISystemXai`
+
+[OpenTelemetry Semantic Conventions documentation]: https://github.com/open-telemetry/semantic-conventions
+[open an issue]: https://github.com/open-telemetry/opentelemetry-go/issues/new?template=Blank+issue
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md
new file mode 100644
index 000000000..d795247f3
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/README.md
@@ -0,0 +1,3 @@
+# Semconv v1.37.0
+
+[](https://pkg.go.dev/go.opentelemetry.io/otel/semconv/v1.37.0)
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
new file mode 100644
index 000000000..b6b27498f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/attribute_group.go
@@ -0,0 +1,15193 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
+
+import "go.opentelemetry.io/otel/attribute"
+
+// Namespace: android
+const (
+ // AndroidAppStateKey is the attribute Key conforming to the "android.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "created"
+ // Note: The Android lifecycle states are defined in
+ // [Activity lifecycle callbacks], and from which the `OS identifiers` are
+ // derived.
+ //
+ // [Activity lifecycle callbacks]: https://developer.android.com/guide/components/activities/activity-lifecycle#lc
+ AndroidAppStateKey = attribute.Key("android.app.state")
+
+ // AndroidOSAPILevelKey is the attribute Key conforming to the
+ // "android.os.api_level" semantic conventions. It represents the uniquely
+ // identifies the framework API revision offered by a version (`os.version`) of
+ // the android operating system. More information can be found in the
+ // [Android API levels documentation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "33", "32"
+ //
+ // [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+ AndroidOSAPILevelKey = attribute.Key("android.os.api_level")
+)
+
+// AndroidOSAPILevel returns an attribute KeyValue conforming to the
+// "android.os.api_level" semantic conventions. It represents the uniquely
+// identifies the framework API revision offered by a version (`os.version`) of
+// the android operating system. More information can be found in the
+// [Android API levels documentation].
+//
+// [Android API levels documentation]: https://developer.android.com/guide/topics/manifest/uses-sdk-element#ApiLevels
+func AndroidOSAPILevel(val string) attribute.KeyValue {
+ return AndroidOSAPILevelKey.String(val)
+}
+
+// Enum values for android.app.state
+var (
+ // Any time before Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called in the app for the first time.
+ //
+ // Stability: development
+ AndroidAppStateCreated = AndroidAppStateKey.String("created")
+ // Any time after Activity.onPause() or, if the app has no Activity,
+ // Context.stopService() has been called when the app was in the foreground
+ // state.
+ //
+ // Stability: development
+ AndroidAppStateBackground = AndroidAppStateKey.String("background")
+ // Any time after Activity.onResume() or, if the app has no Activity,
+ // Context.startService() has been called when the app was in either the created
+ // or background states.
+ //
+ // Stability: development
+ AndroidAppStateForeground = AndroidAppStateKey.String("foreground")
+)
+
+// Namespace: app
+const (
+ // AppBuildIDKey is the attribute Key conforming to the "app.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6cff0a7e-cefc-4668-96f5-1273d8b334d0",
+ // "9f2b833506aa6973a92fde9733e6271f", "my-app-1.0.0-code-123"
+ AppBuildIDKey = attribute.Key("app.build_id")
+
+ // AppInstallationIDKey is the attribute Key conforming to the
+ // "app.installation.id" semantic conventions. It represents a unique identifier
+ // representing the installation of an application on a specific device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2ab2916d-a51f-4ac8-80ee-45ac31a28092"
+ // Note: Its value SHOULD persist across launches of the same application
+ // installation, including through application upgrades.
+ // It SHOULD change if the application is uninstalled or if all applications of
+ // the vendor are uninstalled.
+ // Additionally, users might be able to reset this value (e.g. by clearing
+ // application data).
+ // If an app is installed multiple times on the same device (e.g. in different
+ // accounts on Android), each `app.installation.id` SHOULD have a different
+ // value.
+ // If multiple OpenTelemetry SDKs are used within the same application, they
+ // SHOULD use the same value for `app.installation.id`.
+ // Hardware IDs (e.g. serial number, IMEI, MAC address) MUST NOT be used as the
+ // `app.installation.id`.
+ //
+ // For iOS, this value SHOULD be equal to the [vendor identifier].
+ //
+ // For Android, examples of `app.installation.id` implementations include:
+ //
+ // - [Firebase Installation ID].
+ // - A globally unique UUID which is persisted across sessions in your
+ // application.
+ // - [App set ID].
+ // - [`Settings.getString(Settings.Secure.ANDROID_ID)`].
+ //
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
+ //
+ // [vendor identifier]: https://developer.apple.com/documentation/uikit/uidevice/identifierforvendor
+ // [Firebase Installation ID]: https://firebase.google.com/docs/projects/manage-installations
+ // [App set ID]: https://developer.android.com/identity/app-set-id
+ // [`Settings.getString(Settings.Secure.ANDROID_ID)`]: https://developer.android.com/reference/android/provider/Settings.Secure#ANDROID_ID
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
+ AppInstallationIDKey = attribute.Key("app.installation.id")
+
+ // AppJankFrameCountKey is the attribute Key conforming to the
+ // "app.jank.frame_count" semantic conventions. It represents a number of frame
+ // renders that experienced jank.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 9, 42
+ // Note: Depending on platform limitations, the value provided MAY be
+ // approximation.
+ AppJankFrameCountKey = attribute.Key("app.jank.frame_count")
+
+ // AppJankPeriodKey is the attribute Key conforming to the "app.jank.period"
+ // semantic conventions. It represents the time period, in seconds, for which
+ // this jank is being reported.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 5.0, 10.24
+ AppJankPeriodKey = attribute.Key("app.jank.period")
+
+ // AppJankThresholdKey is the attribute Key conforming to the
+ // "app.jank.threshold" semantic conventions. It represents the minimum
+ // rendering threshold for this jank, in seconds.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.016, 0.7, 1.024
+ AppJankThresholdKey = attribute.Key("app.jank.threshold")
+
+ // AppScreenCoordinateXKey is the attribute Key conforming to the
+ // "app.screen.coordinate.x" semantic conventions. It represents the x
+ // (horizontal) coordinate of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 131
+ AppScreenCoordinateXKey = attribute.Key("app.screen.coordinate.x")
+
+ // AppScreenCoordinateYKey is the attribute Key conforming to the
+ // "app.screen.coordinate.y" semantic conventions. It represents the y
+ // (vertical) component of a screen coordinate, in screen pixels.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12, 99
+ AppScreenCoordinateYKey = attribute.Key("app.screen.coordinate.y")
+
+ // AppWidgetIDKey is the attribute Key conforming to the "app.widget.id"
+ // semantic conventions. It represents an identifier that uniquely
+ // differentiates this widget from other widgets in the same application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "f9bc787d-ff05-48ad-90e1-fca1d46130b3", "submit_order_1829"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetIDKey = attribute.Key("app.widget.id")
+
+ // AppWidgetNameKey is the attribute Key conforming to the "app.widget.name"
+ // semantic conventions. It represents the name of an application widget.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "submit", "attack", "Clear Cart"
+ // Note: A widget is an application component, typically an on-screen visual GUI
+ // element.
+ AppWidgetNameKey = attribute.Key("app.widget.name")
+)
+
+// AppBuildID returns an attribute KeyValue conforming to the "app.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the application.
+func AppBuildID(val string) attribute.KeyValue {
+ return AppBuildIDKey.String(val)
+}
+
+// AppInstallationID returns an attribute KeyValue conforming to the
+// "app.installation.id" semantic conventions. It represents a unique identifier
+// representing the installation of an application on a specific device.
+func AppInstallationID(val string) attribute.KeyValue {
+ return AppInstallationIDKey.String(val)
+}
+
+// AppJankFrameCount returns an attribute KeyValue conforming to the
+// "app.jank.frame_count" semantic conventions. It represents a number of frame
+// renders that experienced jank.
+func AppJankFrameCount(val int) attribute.KeyValue {
+ return AppJankFrameCountKey.Int(val)
+}
+
+// AppJankPeriod returns an attribute KeyValue conforming to the
+// "app.jank.period" semantic conventions. It represents the time period, in
+// seconds, for which this jank is being reported.
+func AppJankPeriod(val float64) attribute.KeyValue {
+ return AppJankPeriodKey.Float64(val)
+}
+
+// AppJankThreshold returns an attribute KeyValue conforming to the
+// "app.jank.threshold" semantic conventions. It represents the minimum rendering
+// threshold for this jank, in seconds.
+func AppJankThreshold(val float64) attribute.KeyValue {
+ return AppJankThresholdKey.Float64(val)
+}
+
+// AppScreenCoordinateX returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.x" semantic conventions. It represents the x
+// (horizontal) coordinate of a screen coordinate, in screen pixels.
+func AppScreenCoordinateX(val int) attribute.KeyValue {
+ return AppScreenCoordinateXKey.Int(val)
+}
+
+// AppScreenCoordinateY returns an attribute KeyValue conforming to the
+// "app.screen.coordinate.y" semantic conventions. It represents the y (vertical)
+// component of a screen coordinate, in screen pixels.
+func AppScreenCoordinateY(val int) attribute.KeyValue {
+ return AppScreenCoordinateYKey.Int(val)
+}
+
+// AppWidgetID returns an attribute KeyValue conforming to the "app.widget.id"
+// semantic conventions. It represents an identifier that uniquely differentiates
+// this widget from other widgets in the same application.
+func AppWidgetID(val string) attribute.KeyValue {
+ return AppWidgetIDKey.String(val)
+}
+
+// AppWidgetName returns an attribute KeyValue conforming to the
+// "app.widget.name" semantic conventions. It represents the name of an
+// application widget.
+func AppWidgetName(val string) attribute.KeyValue {
+ return AppWidgetNameKey.String(val)
+}
+
+// Namespace: artifact
+const (
+ // ArtifactAttestationFilenameKey is the attribute Key conforming to the
+ // "artifact.attestation.filename" semantic conventions. It represents the
+ // provenance filename of the built attestation which directly relates to the
+ // build artifact filename. This filename SHOULD accompany the artifact at
+ // publish time. See the [SLSA Relationship] specification for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0.attestation",
+ // "docker-image-amd64-v0.1.0.intoto.json1", "release-1.tar.gz.attestation",
+ // "file-name-package.tar.gz.intoto.json1"
+ //
+ // [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+ ArtifactAttestationFilenameKey = attribute.Key("artifact.attestation.filename")
+
+ // ArtifactAttestationHashKey is the attribute Key conforming to the
+ // "artifact.attestation.hash" semantic conventions. It represents the full
+ // [hash value (see glossary)], of the built attestation. Some envelopes in the
+ // [software attestation space] also refer to this as the **digest**.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1b31dfcd5b7f9267bf2ff47651df1cfb9147b9e4df1f335accf65b4cda498408"
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+ ArtifactAttestationHashKey = attribute.Key("artifact.attestation.hash")
+
+ // ArtifactAttestationIDKey is the attribute Key conforming to the
+ // "artifact.attestation.id" semantic conventions. It represents the id of the
+ // build [software attestation].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ //
+ // [software attestation]: https://slsa.dev/attestation-model
+ ArtifactAttestationIDKey = attribute.Key("artifact.attestation.id")
+
+ // ArtifactFilenameKey is the attribute Key conforming to the
+ // "artifact.filename" semantic conventions. It represents the human readable
+ // file name of the artifact, typically generated during build and release
+ // processes. Often includes the package name and version in the file name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "golang-binary-amd64-v0.1.0", "docker-image-amd64-v0.1.0",
+ // "release-1.tar.gz", "file-name-package.tar.gz"
+ // Note: This file name can also act as the [Package Name]
+ // in cases where the package ecosystem maps accordingly.
+ // Additionally, the artifact [can be published]
+ // for others, but that is not a guarantee.
+ //
+ // [Package Name]: https://slsa.dev/spec/v1.0/terminology#package-model
+ // [can be published]: https://slsa.dev/spec/v1.0/terminology#software-supply-chain
+ ArtifactFilenameKey = attribute.Key("artifact.filename")
+
+ // ArtifactHashKey is the attribute Key conforming to the "artifact.hash"
+ // semantic conventions. It represents the full [hash value (see glossary)],
+ // often found in checksum.txt on a release of the artifact and used to verify
+ // package integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9ff4c52759e2c4ac70b7d517bc7fcdc1cda631ca0045271ddd1b192544f8a3e9"
+ // Note: The specific algorithm used to create the cryptographic hash value is
+ // not defined. In situations where an artifact has multiple
+ // cryptographic hashes, it is up to the implementer to choose which
+ // hash value to set here; this should be the most secure hash algorithm
+ // that is suitable for the situation and consistent with the
+ // corresponding attestation. The implementer can then provide the other
+ // hash values through an additional set of attribute extensions as they
+ // deem necessary.
+ //
+ // [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ ArtifactHashKey = attribute.Key("artifact.hash")
+
+ // ArtifactPurlKey is the attribute Key conforming to the "artifact.purl"
+ // semantic conventions. It represents the [Package URL] of the
+ // [package artifact] provides a standard way to identify and locate the
+ // packaged artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pkg:github/package-url/purl-spec@1209109710924",
+ // "pkg:npm/foo@12.12.3"
+ //
+ // [Package URL]: https://github.com/package-url/purl-spec
+ // [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+ ArtifactPurlKey = attribute.Key("artifact.purl")
+
+ // ArtifactVersionKey is the attribute Key conforming to the "artifact.version"
+ // semantic conventions. It represents the version of the artifact.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v0.1.0", "1.2.1", "122691-build"
+ ArtifactVersionKey = attribute.Key("artifact.version")
+)
+
+// ArtifactAttestationFilename returns an attribute KeyValue conforming to the
+// "artifact.attestation.filename" semantic conventions. It represents the
+// provenance filename of the built attestation which directly relates to the
+// build artifact filename. This filename SHOULD accompany the artifact at
+// publish time. See the [SLSA Relationship] specification for more information.
+//
+// [SLSA Relationship]: https://slsa.dev/spec/v1.0/distributing-provenance#relationship-between-artifacts-and-attestations
+func ArtifactAttestationFilename(val string) attribute.KeyValue {
+ return ArtifactAttestationFilenameKey.String(val)
+}
+
+// ArtifactAttestationHash returns an attribute KeyValue conforming to the
+// "artifact.attestation.hash" semantic conventions. It represents the full
+// [hash value (see glossary)], of the built attestation. Some envelopes in the
+// [software attestation space] also refer to this as the **digest**.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+// [software attestation space]: https://github.com/in-toto/attestation/tree/main/spec
+func ArtifactAttestationHash(val string) attribute.KeyValue {
+ return ArtifactAttestationHashKey.String(val)
+}
+
+// ArtifactAttestationID returns an attribute KeyValue conforming to the
+// "artifact.attestation.id" semantic conventions. It represents the id of the
+// build [software attestation].
+//
+// [software attestation]: https://slsa.dev/attestation-model
+func ArtifactAttestationID(val string) attribute.KeyValue {
+ return ArtifactAttestationIDKey.String(val)
+}
+
+// ArtifactFilename returns an attribute KeyValue conforming to the
+// "artifact.filename" semantic conventions. It represents the human readable
+// file name of the artifact, typically generated during build and release
+// processes. Often includes the package name and version in the file name.
+func ArtifactFilename(val string) attribute.KeyValue {
+ return ArtifactFilenameKey.String(val)
+}
+
+// ArtifactHash returns an attribute KeyValue conforming to the "artifact.hash"
+// semantic conventions. It represents the full [hash value (see glossary)],
+// often found in checksum.txt on a release of the artifact and used to verify
+// package integrity.
+//
+// [hash value (see glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+func ArtifactHash(val string) attribute.KeyValue {
+ return ArtifactHashKey.String(val)
+}
+
+// ArtifactPurl returns an attribute KeyValue conforming to the "artifact.purl"
+// semantic conventions. It represents the [Package URL] of the
+// [package artifact] provides a standard way to identify and locate the packaged
+// artifact.
+//
+// [Package URL]: https://github.com/package-url/purl-spec
+// [package artifact]: https://slsa.dev/spec/v1.0/terminology#package-model
+func ArtifactPurl(val string) attribute.KeyValue {
+ return ArtifactPurlKey.String(val)
+}
+
+// ArtifactVersion returns an attribute KeyValue conforming to the
+// "artifact.version" semantic conventions. It represents the version of the
+// artifact.
+func ArtifactVersion(val string) attribute.KeyValue {
+ return ArtifactVersionKey.String(val)
+}
+
+// Namespace: aws
+const (
+ // AWSBedrockGuardrailIDKey is the attribute Key conforming to the
+ // "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+ // identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+ // prevent unwanted behavior from model responses or user messages.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "sgi5gkybzqak"
+ //
+ // [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+ AWSBedrockGuardrailIDKey = attribute.Key("aws.bedrock.guardrail.id")
+
+ // AWSBedrockKnowledgeBaseIDKey is the attribute Key conforming to the
+ // "aws.bedrock.knowledge_base.id" semantic conventions. It represents the
+ // unique identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a
+ // bank of information that can be queried by models to generate more relevant
+ // responses and augment prompts.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "XFWUPB9PAW"
+ //
+ // [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+ AWSBedrockKnowledgeBaseIDKey = attribute.Key("aws.bedrock.knowledge_base.id")
+
+ // AWSDynamoDBAttributeDefinitionsKey is the attribute Key conforming to the
+ // "aws.dynamodb.attribute_definitions" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `AttributeDefinitions` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "AttributeName": "string", "AttributeType": "string" }"
+ AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions")
+
+ // AWSDynamoDBAttributesToGetKey is the attribute Key conforming to the
+ // "aws.dynamodb.attributes_to_get" semantic conventions. It represents the
+ // value of the `AttributesToGet` request parameter.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lives", "id"
+ AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get")
+
+ // AWSDynamoDBConsistentReadKey is the attribute Key conforming to the
+ // "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+ // of the `ConsistentRead` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read")
+
+ // AWSDynamoDBConsumedCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+ // JSON-serialized value of each item in the `ConsumedCapacity` response field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" :
+ // { "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } },
+ // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number,
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName":
+ // "string", "WriteCapacityUnits": number }"
+ AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity")
+
+ // AWSDynamoDBCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.count" semantic conventions. It represents the value of the
+ // `Count` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count")
+
+ // AWSDynamoDBExclusiveStartTableKey is the attribute Key conforming to the
+ // "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+ // value of the `ExclusiveStartTableName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "CatsTable"
+ AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table")
+
+ // AWSDynamoDBGlobalSecondaryIndexUpdatesKey is the attribute Key conforming to
+ // the "aws.dynamodb.global_secondary_index_updates" semantic conventions. It
+ // represents the JSON-serialized value of each item in the
+ // `GlobalSecondaryIndexUpdates` request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "Create": { "IndexName": "string", "KeySchema": [ {
+ // "AttributeName": "string", "KeyType": "string" } ], "Projection": {
+ // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" },
+ // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits":
+ // number } }"
+ AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates")
+
+ // AWSDynamoDBGlobalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.global_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `GlobalSecondaryIndexes`
+ // request field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexName": "string", "KeySchema": [ { "AttributeName":
+ // "string", "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [
+ // "string" ], "ProjectionType": "string" }, "ProvisionedThroughput": {
+ // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }"
+ AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes")
+
+ // AWSDynamoDBIndexNameKey is the attribute Key conforming to the
+ // "aws.dynamodb.index_name" semantic conventions. It represents the value of
+ // the `IndexName` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "name_to_group"
+ AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name")
+
+ // AWSDynamoDBItemCollectionMetricsKey is the attribute Key conforming to the
+ // "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+ // the JSON-serialized value of the `ItemCollectionMetrics` response field.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob,
+ // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" :
+ // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S":
+ // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }"
+ AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics")
+
+ // AWSDynamoDBLimitKey is the attribute Key conforming to the
+ // "aws.dynamodb.limit" semantic conventions. It represents the value of the
+ // `Limit` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit")
+
+ // AWSDynamoDBLocalSecondaryIndexesKey is the attribute Key conforming to the
+ // "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+ // the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+ // field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "{ "IndexArn": "string", "IndexName": "string", "IndexSizeBytes":
+ // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string",
+ // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ],
+ // "ProjectionType": "string" } }"
+ AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes")
+
+ // AWSDynamoDBProjectionKey is the attribute Key conforming to the
+ // "aws.dynamodb.projection" semantic conventions. It represents the value of
+ // the `ProjectionExpression` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Title", "Title, Price, Color", "Title, Description, RelatedItems,
+ // ProductReviews"
+ AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection")
+
+ // AWSDynamoDBProvisionedReadCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_read_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity")
+
+ // AWSDynamoDBProvisionedWriteCapacityKey is the attribute Key conforming to the
+ // "aws.dynamodb.provisioned_write_capacity" semantic conventions. It represents
+ // the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+ // parameter.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0, 2.0
+ AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity")
+
+ // AWSDynamoDBScanForwardKey is the attribute Key conforming to the
+ // "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+ // the `ScanIndexForward` request parameter.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward")
+
+ // AWSDynamoDBScannedCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+ // the `ScannedCount` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 50
+ AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count")
+
+ // AWSDynamoDBSegmentKey is the attribute Key conforming to the
+ // "aws.dynamodb.segment" semantic conventions. It represents the value of the
+ // `Segment` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10
+ AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment")
+
+ // AWSDynamoDBSelectKey is the attribute Key conforming to the
+ // "aws.dynamodb.select" semantic conventions. It represents the value of the
+ // `Select` request parameter.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ALL_ATTRIBUTES", "COUNT"
+ AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select")
+
+ // AWSDynamoDBTableCountKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_count" semantic conventions. It represents the number of
+ // items in the `TableNames` response parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 20
+ AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count")
+
+ // AWSDynamoDBTableNamesKey is the attribute Key conforming to the
+ // "aws.dynamodb.table_names" semantic conventions. It represents the keys in
+ // the `RequestItems` object field.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Users", "Cats"
+ AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names")
+
+ // AWSDynamoDBTotalSegmentsKey is the attribute Key conforming to the
+ // "aws.dynamodb.total_segments" semantic conventions. It represents the value
+ // of the `TotalSegments` request parameter.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments")
+
+ // AWSECSClusterARNKey is the attribute Key conforming to the
+ // "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+ // [ECS cluster].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ //
+ // [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+ AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn")
+
+ // AWSECSContainerARNKey is the attribute Key conforming to the
+ // "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+ // Resource Name (ARN) of an [ECS container instance].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9"
+ //
+ // [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+ AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn")
+
+ // AWSECSLaunchtypeKey is the attribute Key conforming to the
+ // "aws.ecs.launchtype" semantic conventions. It represents the [launch type]
+ // for an ECS task.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [launch type]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/launch_types.html
+ AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype")
+
+ // AWSECSTaskARNKey is the attribute Key conforming to the "aws.ecs.task.arn"
+ // semantic conventions. It represents the ARN of a running [ECS task].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:ecs:us-west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "arn:aws:ecs:us-west-1:123456789123:task/my-cluster/task-id/23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ //
+ // [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+ AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn")
+
+ // AWSECSTaskFamilyKey is the attribute Key conforming to the
+ // "aws.ecs.task.family" semantic conventions. It represents the family name of
+ // the [ECS task definition] used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-family"
+ //
+ // [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+ AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family")
+
+ // AWSECSTaskIDKey is the attribute Key conforming to the "aws.ecs.task.id"
+ // semantic conventions. It represents the ID of a running ECS task. The ID MUST
+ // be extracted from `task.arn`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10838bed-421f-43ef-870a-f43feacbbb5b",
+ // "23ebb8ac-c18f-46c6-8bbe-d55d0e37cfbd"
+ AWSECSTaskIDKey = attribute.Key("aws.ecs.task.id")
+
+ // AWSECSTaskRevisionKey is the attribute Key conforming to the
+ // "aws.ecs.task.revision" semantic conventions. It represents the revision for
+ // the task definition used to create the ECS task.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "8", "26"
+ AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision")
+
+ // AWSEKSClusterARNKey is the attribute Key conforming to the
+ // "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+ // cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster"
+ AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn")
+
+ // AWSExtendedRequestIDKey is the attribute Key conforming to the
+ // "aws.extended_request_id" semantic conventions. It represents the AWS
+ // extended request ID as returned in the response header `x-amz-id-2`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "wzHcyEWfmOGDIE5QOhTAqFDoDWP3y8IUvpNINCwL9N4TEHbUw0/gZJ+VZTmCNCWR7fezEN3eCiQ="
+ AWSExtendedRequestIDKey = attribute.Key("aws.extended_request_id")
+
+ // AWSKinesisStreamNameKey is the attribute Key conforming to the
+ // "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+ // AWS Kinesis [stream] the request refers to. Corresponds to the
+ // `--stream-name` parameter of the Kinesis [describe-stream] operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-stream-name"
+ //
+ // [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+ // [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+ AWSKinesisStreamNameKey = attribute.Key("aws.kinesis.stream_name")
+
+ // AWSLambdaInvokedARNKey is the attribute Key conforming to the
+ // "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+ // ARN as provided on the `Context` passed to the function (
+ // `Lambda-Runtime-Invoked-Function-Arn` header on the
+ // `/runtime/invocation/next` applicable).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:us-east-1:123456:function:myfunction:myalias"
+ // Note: This may be different from `cloud.resource_id` if an alias is involved.
+ AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn")
+
+ // AWSLambdaResourceMappingIDKey is the attribute Key conforming to the
+ // "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+ // of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+ // function. It's contents are read by Lambda and used to trigger a function.
+ // This isn't available in the lambda execution context or the lambda runtime
+ // environtment. This is going to be populated by the AWS SDK for each language
+ // when that UUID is present. Some of these operations are
+ // Create/Delete/Get/List/Update EventSourceMapping.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "587ad24b-03b9-4413-8202-bbd56b36e5b7"
+ //
+ // [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+ AWSLambdaResourceMappingIDKey = attribute.Key("aws.lambda.resource_mapping.id")
+
+ // AWSLogGroupARNsKey is the attribute Key conforming to the
+ // "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+ // Name(s) (ARN) of the AWS log group(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*"
+ // Note: See the [log group ARN format documentation].
+ //
+ // [log group ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns")
+
+ // AWSLogGroupNamesKey is the attribute Key conforming to the
+ // "aws.log.group.names" semantic conventions. It represents the name(s) of the
+ // AWS log group(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/aws/lambda/my-function", "opentelemetry-service"
+ // Note: Multiple log groups must be supported for cases like multi-container
+ // applications, where a single application has sidecar containers, and each
+ // write to their own log group.
+ AWSLogGroupNamesKey = attribute.Key("aws.log.group.names")
+
+ // AWSLogStreamARNsKey is the attribute Key conforming to the
+ // "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+ // AWS log stream(s).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log-stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ // Note: See the [log stream ARN format documentation]. One log group can
+ // contain several log streams, so these ARNs necessarily identify both a log
+ // group and a log stream.
+ //
+ // [log stream ARN format documentation]: https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam-access-control-overview-cwl.html#CWL_ARN_Format
+ AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns")
+
+ // AWSLogStreamNamesKey is the attribute Key conforming to the
+ // "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+ // AWS log stream(s) an application is writing to.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "logs/main/10838bed-421f-43ef-870a-f43feacbbb5b"
+ AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names")
+
+ // AWSRequestIDKey is the attribute Key conforming to the "aws.request_id"
+ // semantic conventions. It represents the AWS request ID as returned in the
+ // response headers `x-amzn-requestid`, `x-amzn-request-id` or
+ // `x-amz-request-id`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "79b9da39-b7ae-508a-a6bc-864b2829c622", "C9ER4AJX75574TDJ"
+ AWSRequestIDKey = attribute.Key("aws.request_id")
+
+ // AWSS3BucketKey is the attribute Key conforming to the "aws.s3.bucket"
+ // semantic conventions. It represents the S3 bucket name the request refers to.
+ // Corresponds to the `--bucket` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "some-bucket-name"
+ // Note: The `bucket` attribute is applicable to all S3 operations that
+ // reference a bucket, i.e. that require the bucket name as a mandatory
+ // parameter.
+ // This applies to almost all S3 operations except `list-buckets`.
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ AWSS3BucketKey = attribute.Key("aws.s3.bucket")
+
+ // AWSS3CopySourceKey is the attribute Key conforming to the
+ // "aws.s3.copy_source" semantic conventions. It represents the source object
+ // (in the form `bucket`/`key`) for the copy operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `copy_source` attribute applies to S3 copy operations and
+ // corresponds to the `--copy-source` parameter
+ // of the [copy-object operation within the S3 API].
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [upload-part-copy]
+ //
+ //
+ // [copy-object operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3CopySourceKey = attribute.Key("aws.s3.copy_source")
+
+ // AWSS3DeleteKey is the attribute Key conforming to the "aws.s3.delete"
+ // semantic conventions. It represents the delete request container that
+ // specifies the objects to be deleted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "Objects=[{Key=string,VersionId=string},{Key=string,VersionId=string}],Quiet=boolean"
+ // Note: The `delete` attribute is only applicable to the [delete-object]
+ // operation.
+ // The `delete` attribute corresponds to the `--delete` parameter of the
+ // [delete-objects operation within the S3 API].
+ //
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [delete-objects operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-objects.html
+ AWSS3DeleteKey = attribute.Key("aws.s3.delete")
+
+ // AWSS3KeyKey is the attribute Key conforming to the "aws.s3.key" semantic
+ // conventions. It represents the S3 object key the request refers to.
+ // Corresponds to the `--key` parameter of the [S3 API] operations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "someFile.yml"
+ // Note: The `key` attribute is applicable to all object-related S3 operations,
+ // i.e. that require the object key as a mandatory parameter.
+ // This applies in particular to the following operations:
+ //
+ // - [copy-object]
+ // - [delete-object]
+ // - [get-object]
+ // - [head-object]
+ // - [put-object]
+ // - [restore-object]
+ // - [select-object-content]
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [create-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [copy-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/copy-object.html
+ // [delete-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/delete-object.html
+ // [get-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/get-object.html
+ // [head-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/head-object.html
+ // [put-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/put-object.html
+ // [restore-object]: https://docs.aws.amazon.com/cli/latest/reference/s3api/restore-object.html
+ // [select-object-content]: https://docs.aws.amazon.com/cli/latest/reference/s3api/select-object-content.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [create-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/create-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3KeyKey = attribute.Key("aws.s3.key")
+
+ // AWSS3PartNumberKey is the attribute Key conforming to the
+ // "aws.s3.part_number" semantic conventions. It represents the part number of
+ // the part being uploaded in a multipart-upload operation. This is a positive
+ // integer between 1 and 10,000.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3456
+ // Note: The `part_number` attribute is only applicable to the [upload-part]
+ // and [upload-part-copy] operations.
+ // The `part_number` attribute corresponds to the `--part-number` parameter of
+ // the
+ // [upload-part operation within the S3 API].
+ //
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ // [upload-part operation within the S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ AWSS3PartNumberKey = attribute.Key("aws.s3.part_number")
+
+ // AWSS3UploadIDKey is the attribute Key conforming to the "aws.s3.upload_id"
+ // semantic conventions. It represents the upload ID that identifies the
+ // multipart upload.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dfRtDYWFbkRONycy.Yxwh66Yjlx.cph0gtNBtJ"
+ // Note: The `upload_id` attribute applies to S3 multipart-upload operations and
+ // corresponds to the `--upload-id` parameter
+ // of the [S3 API] multipart operations.
+ // This applies in particular to the following operations:
+ //
+ // - [abort-multipart-upload]
+ // - [complete-multipart-upload]
+ // - [list-parts]
+ // - [upload-part]
+ // - [upload-part-copy]
+ //
+ //
+ // [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+ // [abort-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/abort-multipart-upload.html
+ // [complete-multipart-upload]: https://docs.aws.amazon.com/cli/latest/reference/s3api/complete-multipart-upload.html
+ // [list-parts]: https://docs.aws.amazon.com/cli/latest/reference/s3api/list-parts.html
+ // [upload-part]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part.html
+ // [upload-part-copy]: https://docs.aws.amazon.com/cli/latest/reference/s3api/upload-part-copy.html
+ AWSS3UploadIDKey = attribute.Key("aws.s3.upload_id")
+
+ // AWSSecretsmanagerSecretARNKey is the attribute Key conforming to the
+ // "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN
+ // of the Secret stored in the Secrets Mangger.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:secretsmanager:us-east-1:123456789012:secret:SecretName-6RandomCharacters"
+ AWSSecretsmanagerSecretARNKey = attribute.Key("aws.secretsmanager.secret.arn")
+
+ // AWSSNSTopicARNKey is the attribute Key conforming to the "aws.sns.topic.arn"
+ // semantic conventions. It represents the ARN of the AWS SNS Topic. An Amazon
+ // SNS [topic] is a logical access point that acts as a communication channel.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:sns:us-east-1:123456789012:mystack-mytopic-NZJ5JSMVGFIE"
+ //
+ // [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+ AWSSNSTopicARNKey = attribute.Key("aws.sns.topic.arn")
+
+ // AWSSQSQueueURLKey is the attribute Key conforming to the "aws.sqs.queue.url"
+ // semantic conventions. It represents the URL of the AWS SQS Queue. It's a
+ // unique identifier for a queue in Amazon Simple Queue Service (SQS) and is
+ // used to access the queue and perform actions on it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://sqs.us-east-1.amazonaws.com/123456789012/MyQueue"
+ AWSSQSQueueURLKey = attribute.Key("aws.sqs.queue.url")
+
+ // AWSStepFunctionsActivityARNKey is the attribute Key conforming to the
+ // "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+ // of the AWS Step Functions Activity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:states:us-east-1:123456789012:activity:get-greeting"
+ AWSStepFunctionsActivityARNKey = attribute.Key("aws.step_functions.activity.arn")
+
+ // AWSStepFunctionsStateMachineARNKey is the attribute Key conforming to the
+ // "aws.step_functions.state_machine.arn" semantic conventions. It represents
+ // the ARN of the AWS Step Functions State Machine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "arn:aws:states:us-east-1:123456789012:stateMachine:myStateMachine:1"
+ AWSStepFunctionsStateMachineARNKey = attribute.Key("aws.step_functions.state_machine.arn")
+)
+
+// AWSBedrockGuardrailID returns an attribute KeyValue conforming to the
+// "aws.bedrock.guardrail.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Guardrail. A [guardrail] helps safeguard and
+// prevent unwanted behavior from model responses or user messages.
+//
+// [guardrail]: https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html
+func AWSBedrockGuardrailID(val string) attribute.KeyValue {
+ return AWSBedrockGuardrailIDKey.String(val)
+}
+
+// AWSBedrockKnowledgeBaseID returns an attribute KeyValue conforming to the
+// "aws.bedrock.knowledge_base.id" semantic conventions. It represents the unique
+// identifier of the AWS Bedrock Knowledge base. A [knowledge base] is a bank of
+// information that can be queried by models to generate more relevant responses
+// and augment prompts.
+//
+// [knowledge base]: https://docs.aws.amazon.com/bedrock/latest/userguide/knowledge-base.html
+func AWSBedrockKnowledgeBaseID(val string) attribute.KeyValue {
+ return AWSBedrockKnowledgeBaseIDKey.String(val)
+}
+
+// AWSDynamoDBAttributeDefinitions returns an attribute KeyValue conforming to
+// the "aws.dynamodb.attribute_definitions" semantic conventions. It represents
+// the JSON-serialized value of each item in the `AttributeDefinitions` request
+// field.
+func AWSDynamoDBAttributeDefinitions(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributeDefinitionsKey.StringSlice(val)
+}
+
+// AWSDynamoDBAttributesToGet returns an attribute KeyValue conforming to the
+// "aws.dynamodb.attributes_to_get" semantic conventions. It represents the value
+// of the `AttributesToGet` request parameter.
+func AWSDynamoDBAttributesToGet(val ...string) attribute.KeyValue {
+ return AWSDynamoDBAttributesToGetKey.StringSlice(val)
+}
+
+// AWSDynamoDBConsistentRead returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consistent_read" semantic conventions. It represents the value
+// of the `ConsistentRead` request parameter.
+func AWSDynamoDBConsistentRead(val bool) attribute.KeyValue {
+ return AWSDynamoDBConsistentReadKey.Bool(val)
+}
+
+// AWSDynamoDBConsumedCapacity returns an attribute KeyValue conforming to the
+// "aws.dynamodb.consumed_capacity" semantic conventions. It represents the
+// JSON-serialized value of each item in the `ConsumedCapacity` response field.
+func AWSDynamoDBConsumedCapacity(val ...string) attribute.KeyValue {
+ return AWSDynamoDBConsumedCapacityKey.StringSlice(val)
+}
+
+// AWSDynamoDBCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.count" semantic conventions. It represents the value of the
+// `Count` response parameter.
+func AWSDynamoDBCount(val int) attribute.KeyValue {
+ return AWSDynamoDBCountKey.Int(val)
+}
+
+// AWSDynamoDBExclusiveStartTable returns an attribute KeyValue conforming to the
+// "aws.dynamodb.exclusive_start_table" semantic conventions. It represents the
+// value of the `ExclusiveStartTableName` request parameter.
+func AWSDynamoDBExclusiveStartTable(val string) attribute.KeyValue {
+ return AWSDynamoDBExclusiveStartTableKey.String(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexUpdates returns an attribute KeyValue
+// conforming to the "aws.dynamodb.global_secondary_index_updates" semantic
+// conventions. It represents the JSON-serialized value of each item in the
+// `GlobalSecondaryIndexUpdates` request field.
+func AWSDynamoDBGlobalSecondaryIndexUpdates(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexUpdatesKey.StringSlice(val)
+}
+
+// AWSDynamoDBGlobalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.global_secondary_indexes" semantic conventions. It
+// represents the JSON-serialized value of each item of the
+// `GlobalSecondaryIndexes` request field.
+func AWSDynamoDBGlobalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBGlobalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBIndexName returns an attribute KeyValue conforming to the
+// "aws.dynamodb.index_name" semantic conventions. It represents the value of the
+// `IndexName` request parameter.
+func AWSDynamoDBIndexName(val string) attribute.KeyValue {
+ return AWSDynamoDBIndexNameKey.String(val)
+}
+
+// AWSDynamoDBItemCollectionMetrics returns an attribute KeyValue conforming to
+// the "aws.dynamodb.item_collection_metrics" semantic conventions. It represents
+// the JSON-serialized value of the `ItemCollectionMetrics` response field.
+func AWSDynamoDBItemCollectionMetrics(val string) attribute.KeyValue {
+ return AWSDynamoDBItemCollectionMetricsKey.String(val)
+}
+
+// AWSDynamoDBLimit returns an attribute KeyValue conforming to the
+// "aws.dynamodb.limit" semantic conventions. It represents the value of the
+// `Limit` request parameter.
+func AWSDynamoDBLimit(val int) attribute.KeyValue {
+ return AWSDynamoDBLimitKey.Int(val)
+}
+
+// AWSDynamoDBLocalSecondaryIndexes returns an attribute KeyValue conforming to
+// the "aws.dynamodb.local_secondary_indexes" semantic conventions. It represents
+// the JSON-serialized value of each item of the `LocalSecondaryIndexes` request
+// field.
+func AWSDynamoDBLocalSecondaryIndexes(val ...string) attribute.KeyValue {
+ return AWSDynamoDBLocalSecondaryIndexesKey.StringSlice(val)
+}
+
+// AWSDynamoDBProjection returns an attribute KeyValue conforming to the
+// "aws.dynamodb.projection" semantic conventions. It represents the value of the
+// `ProjectionExpression` request parameter.
+func AWSDynamoDBProjection(val string) attribute.KeyValue {
+ return AWSDynamoDBProjectionKey.String(val)
+}
+
+// AWSDynamoDBProvisionedReadCapacity returns an attribute KeyValue conforming to
+// the "aws.dynamodb.provisioned_read_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.ReadCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedReadCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedReadCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBProvisionedWriteCapacity returns an attribute KeyValue conforming
+// to the "aws.dynamodb.provisioned_write_capacity" semantic conventions. It
+// represents the value of the `ProvisionedThroughput.WriteCapacityUnits` request
+// parameter.
+func AWSDynamoDBProvisionedWriteCapacity(val float64) attribute.KeyValue {
+ return AWSDynamoDBProvisionedWriteCapacityKey.Float64(val)
+}
+
+// AWSDynamoDBScanForward returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scan_forward" semantic conventions. It represents the value of
+// the `ScanIndexForward` request parameter.
+func AWSDynamoDBScanForward(val bool) attribute.KeyValue {
+ return AWSDynamoDBScanForwardKey.Bool(val)
+}
+
+// AWSDynamoDBScannedCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.scanned_count" semantic conventions. It represents the value of
+// the `ScannedCount` response parameter.
+func AWSDynamoDBScannedCount(val int) attribute.KeyValue {
+ return AWSDynamoDBScannedCountKey.Int(val)
+}
+
+// AWSDynamoDBSegment returns an attribute KeyValue conforming to the
+// "aws.dynamodb.segment" semantic conventions. It represents the value of the
+// `Segment` request parameter.
+func AWSDynamoDBSegment(val int) attribute.KeyValue {
+ return AWSDynamoDBSegmentKey.Int(val)
+}
+
+// AWSDynamoDBSelect returns an attribute KeyValue conforming to the
+// "aws.dynamodb.select" semantic conventions. It represents the value of the
+// `Select` request parameter.
+func AWSDynamoDBSelect(val string) attribute.KeyValue {
+ return AWSDynamoDBSelectKey.String(val)
+}
+
+// AWSDynamoDBTableCount returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_count" semantic conventions. It represents the number of
+// items in the `TableNames` response parameter.
+func AWSDynamoDBTableCount(val int) attribute.KeyValue {
+ return AWSDynamoDBTableCountKey.Int(val)
+}
+
+// AWSDynamoDBTableNames returns an attribute KeyValue conforming to the
+// "aws.dynamodb.table_names" semantic conventions. It represents the keys in the
+// `RequestItems` object field.
+func AWSDynamoDBTableNames(val ...string) attribute.KeyValue {
+ return AWSDynamoDBTableNamesKey.StringSlice(val)
+}
+
+// AWSDynamoDBTotalSegments returns an attribute KeyValue conforming to the
+// "aws.dynamodb.total_segments" semantic conventions. It represents the value of
+// the `TotalSegments` request parameter.
+func AWSDynamoDBTotalSegments(val int) attribute.KeyValue {
+ return AWSDynamoDBTotalSegmentsKey.Int(val)
+}
+
+// AWSECSClusterARN returns an attribute KeyValue conforming to the
+// "aws.ecs.cluster.arn" semantic conventions. It represents the ARN of an
+// [ECS cluster].
+//
+// [ECS cluster]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/clusters.html
+func AWSECSClusterARN(val string) attribute.KeyValue {
+ return AWSECSClusterARNKey.String(val)
+}
+
+// AWSECSContainerARN returns an attribute KeyValue conforming to the
+// "aws.ecs.container.arn" semantic conventions. It represents the Amazon
+// Resource Name (ARN) of an [ECS container instance].
+//
+// [ECS container instance]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ECS_instances.html
+func AWSECSContainerARN(val string) attribute.KeyValue {
+ return AWSECSContainerARNKey.String(val)
+}
+
+// AWSECSTaskARN returns an attribute KeyValue conforming to the
+// "aws.ecs.task.arn" semantic conventions. It represents the ARN of a running
+// [ECS task].
+//
+// [ECS task]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-account-settings.html#ecs-resource-ids
+func AWSECSTaskARN(val string) attribute.KeyValue {
+ return AWSECSTaskARNKey.String(val)
+}
+
+// AWSECSTaskFamily returns an attribute KeyValue conforming to the
+// "aws.ecs.task.family" semantic conventions. It represents the family name of
+// the [ECS task definition] used to create the ECS task.
+//
+// [ECS task definition]: https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_definitions.html
+func AWSECSTaskFamily(val string) attribute.KeyValue {
+ return AWSECSTaskFamilyKey.String(val)
+}
+
+// AWSECSTaskID returns an attribute KeyValue conforming to the "aws.ecs.task.id"
+// semantic conventions. It represents the ID of a running ECS task. The ID MUST
+// be extracted from `task.arn`.
+func AWSECSTaskID(val string) attribute.KeyValue {
+ return AWSECSTaskIDKey.String(val)
+}
+
+// AWSECSTaskRevision returns an attribute KeyValue conforming to the
+// "aws.ecs.task.revision" semantic conventions. It represents the revision for
+// the task definition used to create the ECS task.
+func AWSECSTaskRevision(val string) attribute.KeyValue {
+ return AWSECSTaskRevisionKey.String(val)
+}
+
+// AWSEKSClusterARN returns an attribute KeyValue conforming to the
+// "aws.eks.cluster.arn" semantic conventions. It represents the ARN of an EKS
+// cluster.
+func AWSEKSClusterARN(val string) attribute.KeyValue {
+ return AWSEKSClusterARNKey.String(val)
+}
+
+// AWSExtendedRequestID returns an attribute KeyValue conforming to the
+// "aws.extended_request_id" semantic conventions. It represents the AWS extended
+// request ID as returned in the response header `x-amz-id-2`.
+func AWSExtendedRequestID(val string) attribute.KeyValue {
+ return AWSExtendedRequestIDKey.String(val)
+}
+
+// AWSKinesisStreamName returns an attribute KeyValue conforming to the
+// "aws.kinesis.stream_name" semantic conventions. It represents the name of the
+// AWS Kinesis [stream] the request refers to. Corresponds to the `--stream-name`
+// parameter of the Kinesis [describe-stream] operation.
+//
+// [stream]: https://docs.aws.amazon.com/streams/latest/dev/introduction.html
+// [describe-stream]: https://docs.aws.amazon.com/cli/latest/reference/kinesis/describe-stream.html
+func AWSKinesisStreamName(val string) attribute.KeyValue {
+ return AWSKinesisStreamNameKey.String(val)
+}
+
+// AWSLambdaInvokedARN returns an attribute KeyValue conforming to the
+// "aws.lambda.invoked_arn" semantic conventions. It represents the full invoked
+// ARN as provided on the `Context` passed to the function (
+// `Lambda-Runtime-Invoked-Function-Arn` header on the `/runtime/invocation/next`
+// applicable).
+func AWSLambdaInvokedARN(val string) attribute.KeyValue {
+ return AWSLambdaInvokedARNKey.String(val)
+}
+
+// AWSLambdaResourceMappingID returns an attribute KeyValue conforming to the
+// "aws.lambda.resource_mapping.id" semantic conventions. It represents the UUID
+// of the [AWS Lambda EvenSource Mapping]. An event source is mapped to a lambda
+// function. It's contents are read by Lambda and used to trigger a function.
+// This isn't available in the lambda execution context or the lambda runtime
+// environtment. This is going to be populated by the AWS SDK for each language
+// when that UUID is present. Some of these operations are
+// Create/Delete/Get/List/Update EventSourceMapping.
+//
+// [AWS Lambda EvenSource Mapping]: https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-lambda-eventsourcemapping.html
+func AWSLambdaResourceMappingID(val string) attribute.KeyValue {
+ return AWSLambdaResourceMappingIDKey.String(val)
+}
+
+// AWSLogGroupARNs returns an attribute KeyValue conforming to the
+// "aws.log.group.arns" semantic conventions. It represents the Amazon Resource
+// Name(s) (ARN) of the AWS log group(s).
+func AWSLogGroupARNs(val ...string) attribute.KeyValue {
+ return AWSLogGroupARNsKey.StringSlice(val)
+}
+
+// AWSLogGroupNames returns an attribute KeyValue conforming to the
+// "aws.log.group.names" semantic conventions. It represents the name(s) of the
+// AWS log group(s) an application is writing to.
+func AWSLogGroupNames(val ...string) attribute.KeyValue {
+ return AWSLogGroupNamesKey.StringSlice(val)
+}
+
+// AWSLogStreamARNs returns an attribute KeyValue conforming to the
+// "aws.log.stream.arns" semantic conventions. It represents the ARN(s) of the
+// AWS log stream(s).
+func AWSLogStreamARNs(val ...string) attribute.KeyValue {
+ return AWSLogStreamARNsKey.StringSlice(val)
+}
+
+// AWSLogStreamNames returns an attribute KeyValue conforming to the
+// "aws.log.stream.names" semantic conventions. It represents the name(s) of the
+// AWS log stream(s) an application is writing to.
+func AWSLogStreamNames(val ...string) attribute.KeyValue {
+ return AWSLogStreamNamesKey.StringSlice(val)
+}
+
+// AWSRequestID returns an attribute KeyValue conforming to the "aws.request_id"
+// semantic conventions. It represents the AWS request ID as returned in the
+// response headers `x-amzn-requestid`, `x-amzn-request-id` or `x-amz-request-id`
+// .
+func AWSRequestID(val string) attribute.KeyValue {
+ return AWSRequestIDKey.String(val)
+}
+
+// AWSS3Bucket returns an attribute KeyValue conforming to the "aws.s3.bucket"
+// semantic conventions. It represents the S3 bucket name the request refers to.
+// Corresponds to the `--bucket` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Bucket(val string) attribute.KeyValue {
+ return AWSS3BucketKey.String(val)
+}
+
+// AWSS3CopySource returns an attribute KeyValue conforming to the
+// "aws.s3.copy_source" semantic conventions. It represents the source object (in
+// the form `bucket`/`key`) for the copy operation.
+func AWSS3CopySource(val string) attribute.KeyValue {
+ return AWSS3CopySourceKey.String(val)
+}
+
+// AWSS3Delete returns an attribute KeyValue conforming to the "aws.s3.delete"
+// semantic conventions. It represents the delete request container that
+// specifies the objects to be deleted.
+func AWSS3Delete(val string) attribute.KeyValue {
+ return AWSS3DeleteKey.String(val)
+}
+
+// AWSS3Key returns an attribute KeyValue conforming to the "aws.s3.key" semantic
+// conventions. It represents the S3 object key the request refers to.
+// Corresponds to the `--key` parameter of the [S3 API] operations.
+//
+// [S3 API]: https://docs.aws.amazon.com/cli/latest/reference/s3api/index.html
+func AWSS3Key(val string) attribute.KeyValue {
+ return AWSS3KeyKey.String(val)
+}
+
+// AWSS3PartNumber returns an attribute KeyValue conforming to the
+// "aws.s3.part_number" semantic conventions. It represents the part number of
+// the part being uploaded in a multipart-upload operation. This is a positive
+// integer between 1 and 10,000.
+func AWSS3PartNumber(val int) attribute.KeyValue {
+ return AWSS3PartNumberKey.Int(val)
+}
+
+// AWSS3UploadID returns an attribute KeyValue conforming to the
+// "aws.s3.upload_id" semantic conventions. It represents the upload ID that
+// identifies the multipart upload.
+func AWSS3UploadID(val string) attribute.KeyValue {
+ return AWSS3UploadIDKey.String(val)
+}
+
+// AWSSecretsmanagerSecretARN returns an attribute KeyValue conforming to the
+// "aws.secretsmanager.secret.arn" semantic conventions. It represents the ARN of
+// the Secret stored in the Secrets Mangger.
+func AWSSecretsmanagerSecretARN(val string) attribute.KeyValue {
+ return AWSSecretsmanagerSecretARNKey.String(val)
+}
+
+// AWSSNSTopicARN returns an attribute KeyValue conforming to the
+// "aws.sns.topic.arn" semantic conventions. It represents the ARN of the AWS SNS
+// Topic. An Amazon SNS [topic] is a logical access point that acts as a
+// communication channel.
+//
+// [topic]: https://docs.aws.amazon.com/sns/latest/dg/sns-create-topic.html
+func AWSSNSTopicARN(val string) attribute.KeyValue {
+ return AWSSNSTopicARNKey.String(val)
+}
+
+// AWSSQSQueueURL returns an attribute KeyValue conforming to the
+// "aws.sqs.queue.url" semantic conventions. It represents the URL of the AWS SQS
+// Queue. It's a unique identifier for a queue in Amazon Simple Queue Service
+// (SQS) and is used to access the queue and perform actions on it.
+func AWSSQSQueueURL(val string) attribute.KeyValue {
+ return AWSSQSQueueURLKey.String(val)
+}
+
+// AWSStepFunctionsActivityARN returns an attribute KeyValue conforming to the
+// "aws.step_functions.activity.arn" semantic conventions. It represents the ARN
+// of the AWS Step Functions Activity.
+func AWSStepFunctionsActivityARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsActivityARNKey.String(val)
+}
+
+// AWSStepFunctionsStateMachineARN returns an attribute KeyValue conforming to
+// the "aws.step_functions.state_machine.arn" semantic conventions. It represents
+// the ARN of the AWS Step Functions State Machine.
+func AWSStepFunctionsStateMachineARN(val string) attribute.KeyValue {
+ return AWSStepFunctionsStateMachineARNKey.String(val)
+}
+
+// Enum values for aws.ecs.launchtype
+var (
+ // Amazon EC2
+ // Stability: development
+ AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2")
+ // Amazon Fargate
+ // Stability: development
+ AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate")
+)
+
+// Namespace: azure
+const (
+ // AzureClientIDKey is the attribute Key conforming to the "azure.client.id"
+ // semantic conventions. It represents the unique identifier of the client
+ // instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "3ba4827d-4422-483f-b59f-85b74211c11d", "storage-client-1"
+ AzureClientIDKey = attribute.Key("azure.client.id")
+
+ // AzureCosmosDBConnectionModeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.connection.mode" semantic conventions. It represents the
+ // cosmos client connection mode.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBConnectionModeKey = attribute.Key("azure.cosmosdb.connection.mode")
+
+ // AzureCosmosDBConsistencyLevelKey is the attribute Key conforming to the
+ // "azure.cosmosdb.consistency.level" semantic conventions. It represents the
+ // account or request [consistency level].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Eventual", "ConsistentPrefix", "BoundedStaleness", "Strong",
+ // "Session"
+ //
+ // [consistency level]: https://learn.microsoft.com/azure/cosmos-db/consistency-levels
+ AzureCosmosDBConsistencyLevelKey = attribute.Key("azure.cosmosdb.consistency.level")
+
+ // AzureCosmosDBOperationContactedRegionsKey is the attribute Key conforming to
+ // the "azure.cosmosdb.operation.contacted_regions" semantic conventions. It
+ // represents the list of regions contacted during operation in the order that
+ // they were contacted. If there is more than one region listed, it indicates
+ // that the operation was performed on multiple regions i.e. cross-regional
+ // call.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "North Central US", "Australia East", "Australia Southeast"
+ // Note: Region name matches the format of `displayName` in [Azure Location API]
+ //
+ // [Azure Location API]: https://learn.microsoft.com/rest/api/subscription/subscriptions/list-locations?view=rest-subscription-2021-10-01&tabs=HTTP#location
+ AzureCosmosDBOperationContactedRegionsKey = attribute.Key("azure.cosmosdb.operation.contacted_regions")
+
+ // AzureCosmosDBOperationRequestChargeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.operation.request_charge" semantic conventions. It represents
+ // the number of request units consumed by the operation.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 46.18, 1.0
+ AzureCosmosDBOperationRequestChargeKey = attribute.Key("azure.cosmosdb.operation.request_charge")
+
+ // AzureCosmosDBRequestBodySizeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+ // request payload size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ AzureCosmosDBRequestBodySizeKey = attribute.Key("azure.cosmosdb.request.body.size")
+
+ // AzureCosmosDBResponseSubStatusCodeKey is the attribute Key conforming to the
+ // "azure.cosmosdb.response.sub_status_code" semantic conventions. It represents
+ // the cosmos DB sub status code.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000, 1002
+ AzureCosmosDBResponseSubStatusCodeKey = attribute.Key("azure.cosmosdb.response.sub_status_code")
+
+ // AzureResourceProviderNamespaceKey is the attribute Key conforming to the
+ // "azure.resource_provider.namespace" semantic conventions. It represents the
+ // [Azure Resource Provider Namespace] as recognized by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft.Storage", "Microsoft.KeyVault", "Microsoft.ServiceBus"
+ //
+ // [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+ AzureResourceProviderNamespaceKey = attribute.Key("azure.resource_provider.namespace")
+
+ // AzureServiceRequestIDKey is the attribute Key conforming to the
+ // "azure.service.request.id" semantic conventions. It represents the unique
+ // identifier of the service request. It's generated by the Azure service and
+ // returned with the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00000000-0000-0000-0000-000000000000"
+ AzureServiceRequestIDKey = attribute.Key("azure.service.request.id")
+)
+
+// AzureClientID returns an attribute KeyValue conforming to the
+// "azure.client.id" semantic conventions. It represents the unique identifier of
+// the client instance.
+func AzureClientID(val string) attribute.KeyValue {
+ return AzureClientIDKey.String(val)
+}
+
+// AzureCosmosDBOperationContactedRegions returns an attribute KeyValue
+// conforming to the "azure.cosmosdb.operation.contacted_regions" semantic
+// conventions. It represents the list of regions contacted during operation in
+// the order that they were contacted. If there is more than one region listed,
+// it indicates that the operation was performed on multiple regions i.e.
+// cross-regional call.
+func AzureCosmosDBOperationContactedRegions(val ...string) attribute.KeyValue {
+ return AzureCosmosDBOperationContactedRegionsKey.StringSlice(val)
+}
+
+// AzureCosmosDBOperationRequestCharge returns an attribute KeyValue conforming
+// to the "azure.cosmosdb.operation.request_charge" semantic conventions. It
+// represents the number of request units consumed by the operation.
+func AzureCosmosDBOperationRequestCharge(val float64) attribute.KeyValue {
+ return AzureCosmosDBOperationRequestChargeKey.Float64(val)
+}
+
+// AzureCosmosDBRequestBodySize returns an attribute KeyValue conforming to the
+// "azure.cosmosdb.request.body.size" semantic conventions. It represents the
+// request payload size in bytes.
+func AzureCosmosDBRequestBodySize(val int) attribute.KeyValue {
+ return AzureCosmosDBRequestBodySizeKey.Int(val)
+}
+
+// AzureCosmosDBResponseSubStatusCode returns an attribute KeyValue conforming to
+// the "azure.cosmosdb.response.sub_status_code" semantic conventions. It
+// represents the cosmos DB sub status code.
+func AzureCosmosDBResponseSubStatusCode(val int) attribute.KeyValue {
+ return AzureCosmosDBResponseSubStatusCodeKey.Int(val)
+}
+
+// AzureResourceProviderNamespace returns an attribute KeyValue conforming to the
+// "azure.resource_provider.namespace" semantic conventions. It represents the
+// [Azure Resource Provider Namespace] as recognized by the client.
+//
+// [Azure Resource Provider Namespace]: https://learn.microsoft.com/azure/azure-resource-manager/management/azure-services-resource-providers
+func AzureResourceProviderNamespace(val string) attribute.KeyValue {
+ return AzureResourceProviderNamespaceKey.String(val)
+}
+
+// AzureServiceRequestID returns an attribute KeyValue conforming to the
+// "azure.service.request.id" semantic conventions. It represents the unique
+// identifier of the service request. It's generated by the Azure service and
+// returned with the response.
+func AzureServiceRequestID(val string) attribute.KeyValue {
+ return AzureServiceRequestIDKey.String(val)
+}
+
+// Enum values for azure.cosmosdb.connection.mode
+var (
+ // Gateway (HTTP) connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeGateway = AzureCosmosDBConnectionModeKey.String("gateway")
+ // Direct connection.
+ // Stability: development
+ AzureCosmosDBConnectionModeDirect = AzureCosmosDBConnectionModeKey.String("direct")
+)
+
+// Enum values for azure.cosmosdb.consistency.level
+var (
+ // Strong
+ // Stability: development
+ AzureCosmosDBConsistencyLevelStrong = AzureCosmosDBConsistencyLevelKey.String("Strong")
+ // Bounded Staleness
+ // Stability: development
+ AzureCosmosDBConsistencyLevelBoundedStaleness = AzureCosmosDBConsistencyLevelKey.String("BoundedStaleness")
+ // Session
+ // Stability: development
+ AzureCosmosDBConsistencyLevelSession = AzureCosmosDBConsistencyLevelKey.String("Session")
+ // Eventual
+ // Stability: development
+ AzureCosmosDBConsistencyLevelEventual = AzureCosmosDBConsistencyLevelKey.String("Eventual")
+ // Consistent Prefix
+ // Stability: development
+ AzureCosmosDBConsistencyLevelConsistentPrefix = AzureCosmosDBConsistencyLevelKey.String("ConsistentPrefix")
+)
+
+// Namespace: browser
+const (
+ // BrowserBrandsKey is the attribute Key conforming to the "browser.brands"
+ // semantic conventions. It represents the array of brand name and version
+ // separated by a space.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: " Not A;Brand 99", "Chromium 99", "Chrome 99"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.brands`).
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserBrandsKey = attribute.Key("browser.brands")
+
+ // BrowserLanguageKey is the attribute Key conforming to the "browser.language"
+ // semantic conventions. It represents the preferred language of the user using
+ // the browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "en", "en-US", "fr", "fr-FR"
+ // Note: This value is intended to be taken from the Navigator API
+ // `navigator.language`.
+ BrowserLanguageKey = attribute.Key("browser.language")
+
+ // BrowserMobileKey is the attribute Key conforming to the "browser.mobile"
+ // semantic conventions. It represents a boolean that is true if the browser is
+ // running on a mobile device.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.mobile`). If unavailable, this attribute SHOULD be
+ // left unset.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ BrowserMobileKey = attribute.Key("browser.mobile")
+
+ // BrowserPlatformKey is the attribute Key conforming to the "browser.platform"
+ // semantic conventions. It represents the platform on which the browser is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Windows", "macOS", "Android"
+ // Note: This value is intended to be taken from the [UA client hints API] (
+ // `navigator.userAgentData.platform`). If unavailable, the legacy
+ // `navigator.platform` API SHOULD NOT be used instead and this attribute SHOULD
+ // be left unset in order for the values to be consistent.
+ // The list of possible values is defined in the
+ // [W3C User-Agent Client Hints specification]. Note that some (but not all) of
+ // these values can overlap with values in the
+ // [`os.type` and `os.name` attributes]. However, for consistency, the values in
+ // the `browser.platform` attribute should capture the exact value that the user
+ // agent provides.
+ //
+ // [UA client hints API]: https://wicg.github.io/ua-client-hints/#interface
+ // [W3C User-Agent Client Hints specification]: https://wicg.github.io/ua-client-hints/#sec-ch-ua-platform
+ // [`os.type` and `os.name` attributes]: ./os.md
+ BrowserPlatformKey = attribute.Key("browser.platform")
+)
+
+// BrowserBrands returns an attribute KeyValue conforming to the "browser.brands"
+// semantic conventions. It represents the array of brand name and version
+// separated by a space.
+func BrowserBrands(val ...string) attribute.KeyValue {
+ return BrowserBrandsKey.StringSlice(val)
+}
+
+// BrowserLanguage returns an attribute KeyValue conforming to the
+// "browser.language" semantic conventions. It represents the preferred language
+// of the user using the browser.
+func BrowserLanguage(val string) attribute.KeyValue {
+ return BrowserLanguageKey.String(val)
+}
+
+// BrowserMobile returns an attribute KeyValue conforming to the "browser.mobile"
+// semantic conventions. It represents a boolean that is true if the browser is
+// running on a mobile device.
+func BrowserMobile(val bool) attribute.KeyValue {
+ return BrowserMobileKey.Bool(val)
+}
+
+// BrowserPlatform returns an attribute KeyValue conforming to the
+// "browser.platform" semantic conventions. It represents the platform on which
+// the browser is running.
+func BrowserPlatform(val string) attribute.KeyValue {
+ return BrowserPlatformKey.String(val)
+}
+
+// Namespace: cassandra
+const (
+ // CassandraConsistencyLevelKey is the attribute Key conforming to the
+ // "cassandra.consistency.level" semantic conventions. It represents the
+ // consistency level of the query. Based on consistency values from [CQL].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [CQL]: https://docs.datastax.com/en/cassandra-oss/3.0/cassandra/dml/dmlConfigConsistency.html
+ CassandraConsistencyLevelKey = attribute.Key("cassandra.consistency.level")
+
+ // CassandraCoordinatorDCKey is the attribute Key conforming to the
+ // "cassandra.coordinator.dc" semantic conventions. It represents the data
+ // center of the coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: us-west-2
+ CassandraCoordinatorDCKey = attribute.Key("cassandra.coordinator.dc")
+
+ // CassandraCoordinatorIDKey is the attribute Key conforming to the
+ // "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+ // coordinating node for a query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: be13faa2-8574-4d71-926d-27f16cf8a7af
+ CassandraCoordinatorIDKey = attribute.Key("cassandra.coordinator.id")
+
+ // CassandraPageSizeKey is the attribute Key conforming to the
+ // "cassandra.page.size" semantic conventions. It represents the fetch size used
+ // for paging, i.e. how many rows will be returned at once.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 5000
+ CassandraPageSizeKey = attribute.Key("cassandra.page.size")
+
+ // CassandraQueryIdempotentKey is the attribute Key conforming to the
+ // "cassandra.query.idempotent" semantic conventions. It represents the whether
+ // or not the query is idempotent.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CassandraQueryIdempotentKey = attribute.Key("cassandra.query.idempotent")
+
+ // CassandraSpeculativeExecutionCountKey is the attribute Key conforming to the
+ // "cassandra.speculative_execution.count" semantic conventions. It represents
+ // the number of times a query was speculatively executed. Not set or `0` if the
+ // query was not executed speculatively.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 2
+ CassandraSpeculativeExecutionCountKey = attribute.Key("cassandra.speculative_execution.count")
+)
+
+// CassandraCoordinatorDC returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.dc" semantic conventions. It represents the data center
+// of the coordinating node for a query.
+func CassandraCoordinatorDC(val string) attribute.KeyValue {
+ return CassandraCoordinatorDCKey.String(val)
+}
+
+// CassandraCoordinatorID returns an attribute KeyValue conforming to the
+// "cassandra.coordinator.id" semantic conventions. It represents the ID of the
+// coordinating node for a query.
+func CassandraCoordinatorID(val string) attribute.KeyValue {
+ return CassandraCoordinatorIDKey.String(val)
+}
+
+// CassandraPageSize returns an attribute KeyValue conforming to the
+// "cassandra.page.size" semantic conventions. It represents the fetch size used
+// for paging, i.e. how many rows will be returned at once.
+func CassandraPageSize(val int) attribute.KeyValue {
+ return CassandraPageSizeKey.Int(val)
+}
+
+// CassandraQueryIdempotent returns an attribute KeyValue conforming to the
+// "cassandra.query.idempotent" semantic conventions. It represents the whether
+// or not the query is idempotent.
+func CassandraQueryIdempotent(val bool) attribute.KeyValue {
+ return CassandraQueryIdempotentKey.Bool(val)
+}
+
+// CassandraSpeculativeExecutionCount returns an attribute KeyValue conforming to
+// the "cassandra.speculative_execution.count" semantic conventions. It
+// represents the number of times a query was speculatively executed. Not set or
+// `0` if the query was not executed speculatively.
+func CassandraSpeculativeExecutionCount(val int) attribute.KeyValue {
+ return CassandraSpeculativeExecutionCountKey.Int(val)
+}
+
+// Enum values for cassandra.consistency.level
+var (
+ // All
+ // Stability: development
+ CassandraConsistencyLevelAll = CassandraConsistencyLevelKey.String("all")
+ // Each Quorum
+ // Stability: development
+ CassandraConsistencyLevelEachQuorum = CassandraConsistencyLevelKey.String("each_quorum")
+ // Quorum
+ // Stability: development
+ CassandraConsistencyLevelQuorum = CassandraConsistencyLevelKey.String("quorum")
+ // Local Quorum
+ // Stability: development
+ CassandraConsistencyLevelLocalQuorum = CassandraConsistencyLevelKey.String("local_quorum")
+ // One
+ // Stability: development
+ CassandraConsistencyLevelOne = CassandraConsistencyLevelKey.String("one")
+ // Two
+ // Stability: development
+ CassandraConsistencyLevelTwo = CassandraConsistencyLevelKey.String("two")
+ // Three
+ // Stability: development
+ CassandraConsistencyLevelThree = CassandraConsistencyLevelKey.String("three")
+ // Local One
+ // Stability: development
+ CassandraConsistencyLevelLocalOne = CassandraConsistencyLevelKey.String("local_one")
+ // Any
+ // Stability: development
+ CassandraConsistencyLevelAny = CassandraConsistencyLevelKey.String("any")
+ // Serial
+ // Stability: development
+ CassandraConsistencyLevelSerial = CassandraConsistencyLevelKey.String("serial")
+ // Local Serial
+ // Stability: development
+ CassandraConsistencyLevelLocalSerial = CassandraConsistencyLevelKey.String("local_serial")
+)
+
+// Namespace: cicd
+const (
+ // CICDPipelineActionNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.action.name" semantic conventions. It represents the kind of
+ // action a pipeline run is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BUILD", "RUN", "SYNC"
+ CICDPipelineActionNameKey = attribute.Key("cicd.pipeline.action.name")
+
+ // CICDPipelineNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.name" semantic conventions. It represents the human readable
+ // name of the pipeline within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Build and Test", "Lint", "Deploy Go Project",
+ // "deploy_to_environment"
+ CICDPipelineNameKey = attribute.Key("cicd.pipeline.name")
+
+ // CICDPipelineResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.result" semantic conventions. It represents the result of a
+ // pipeline run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineResultKey = attribute.Key("cicd.pipeline.result")
+
+ // CICDPipelineRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.id" semantic conventions. It represents the unique
+ // identifier of a pipeline run within a CI/CD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "120912"
+ CICDPipelineRunIDKey = attribute.Key("cicd.pipeline.run.id")
+
+ // CICDPipelineRunStateKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.state" semantic conventions. It represents the pipeline
+ // run goes through these states during its lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pending", "executing", "finalizing"
+ CICDPipelineRunStateKey = attribute.Key("cicd.pipeline.run.state")
+
+ // CICDPipelineRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+ // the pipeline run, providing the complete address in order to locate and
+ // identify the pipeline run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineRunURLFullKey = attribute.Key("cicd.pipeline.run.url.full")
+
+ // CICDPipelineTaskNameKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.name" semantic conventions. It represents the human
+ // readable name of a task within a pipeline. Task here most closely aligns with
+ // a [computing process] in a pipeline. Other terms for tasks include commands,
+ // steps, and procedures.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Run GoLang Linter", "Go Build", "go-test", "deploy_binary"
+ //
+ // [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+ CICDPipelineTaskNameKey = attribute.Key("cicd.pipeline.task.name")
+
+ // CICDPipelineTaskRunIDKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+ // identifier of a task run within a pipeline.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "12097"
+ CICDPipelineTaskRunIDKey = attribute.Key("cicd.pipeline.task.run.id")
+
+ // CICDPipelineTaskRunResultKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.result" semantic conventions. It represents the
+ // result of a task run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "timeout", "skipped"
+ CICDPipelineTaskRunResultKey = attribute.Key("cicd.pipeline.task.run.result")
+
+ // CICDPipelineTaskRunURLFullKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+ // [URL] of the pipeline task run, providing the complete address in order to
+ // locate and identify the pipeline task run.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/open-telemetry/semantic-conventions/actions/runs/9753949763/job/26920038674?pr=1075"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDPipelineTaskRunURLFullKey = attribute.Key("cicd.pipeline.task.run.url.full")
+
+ // CICDPipelineTaskTypeKey is the attribute Key conforming to the
+ // "cicd.pipeline.task.type" semantic conventions. It represents the type of the
+ // task within a pipeline.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "build", "test", "deploy"
+ CICDPipelineTaskTypeKey = attribute.Key("cicd.pipeline.task.type")
+
+ // CICDSystemComponentKey is the attribute Key conforming to the
+ // "cicd.system.component" semantic conventions. It represents the name of a
+ // component of the CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "controller", "scheduler", "agent"
+ CICDSystemComponentKey = attribute.Key("cicd.system.component")
+
+ // CICDWorkerIDKey is the attribute Key conforming to the "cicd.worker.id"
+ // semantic conventions. It represents the unique identifier of a worker within
+ // a CICD system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "abc123", "10.0.1.2", "controller"
+ CICDWorkerIDKey = attribute.Key("cicd.worker.id")
+
+ // CICDWorkerNameKey is the attribute Key conforming to the "cicd.worker.name"
+ // semantic conventions. It represents the name of a worker within a CICD
+ // system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "agent-abc", "controller", "Ubuntu LTS"
+ CICDWorkerNameKey = attribute.Key("cicd.worker.name")
+
+ // CICDWorkerStateKey is the attribute Key conforming to the "cicd.worker.state"
+ // semantic conventions. It represents the state of a CICD worker / agent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle", "busy", "down"
+ CICDWorkerStateKey = attribute.Key("cicd.worker.state")
+
+ // CICDWorkerURLFullKey is the attribute Key conforming to the
+ // "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+ // worker, providing the complete address in order to locate and identify the
+ // worker.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://cicd.example.org/worker/abc123"
+ //
+ // [URL]: https://wikipedia.org/wiki/URL
+ CICDWorkerURLFullKey = attribute.Key("cicd.worker.url.full")
+)
+
+// CICDPipelineName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.name" semantic conventions. It represents the human readable
+// name of the pipeline within a CI/CD system.
+func CICDPipelineName(val string) attribute.KeyValue {
+ return CICDPipelineNameKey.String(val)
+}
+
+// CICDPipelineRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.id" semantic conventions. It represents the unique
+// identifier of a pipeline run within a CI/CD system.
+func CICDPipelineRunID(val string) attribute.KeyValue {
+ return CICDPipelineRunIDKey.String(val)
+}
+
+// CICDPipelineRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.run.url.full" semantic conventions. It represents the [URL] of
+// the pipeline run, providing the complete address in order to locate and
+// identify the pipeline run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineRunURLFullKey.String(val)
+}
+
+// CICDPipelineTaskName returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.name" semantic conventions. It represents the human
+// readable name of a task within a pipeline. Task here most closely aligns with
+// a [computing process] in a pipeline. Other terms for tasks include commands,
+// steps, and procedures.
+//
+// [computing process]: https://wikipedia.org/wiki/Pipeline_(computing)
+func CICDPipelineTaskName(val string) attribute.KeyValue {
+ return CICDPipelineTaskNameKey.String(val)
+}
+
+// CICDPipelineTaskRunID returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.id" semantic conventions. It represents the unique
+// identifier of a task run within a pipeline.
+func CICDPipelineTaskRunID(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunIDKey.String(val)
+}
+
+// CICDPipelineTaskRunURLFull returns an attribute KeyValue conforming to the
+// "cicd.pipeline.task.run.url.full" semantic conventions. It represents the
+// [URL] of the pipeline task run, providing the complete address in order to
+// locate and identify the pipeline task run.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDPipelineTaskRunURLFull(val string) attribute.KeyValue {
+ return CICDPipelineTaskRunURLFullKey.String(val)
+}
+
+// CICDSystemComponent returns an attribute KeyValue conforming to the
+// "cicd.system.component" semantic conventions. It represents the name of a
+// component of the CICD system.
+func CICDSystemComponent(val string) attribute.KeyValue {
+ return CICDSystemComponentKey.String(val)
+}
+
+// CICDWorkerID returns an attribute KeyValue conforming to the "cicd.worker.id"
+// semantic conventions. It represents the unique identifier of a worker within a
+// CICD system.
+func CICDWorkerID(val string) attribute.KeyValue {
+ return CICDWorkerIDKey.String(val)
+}
+
+// CICDWorkerName returns an attribute KeyValue conforming to the
+// "cicd.worker.name" semantic conventions. It represents the name of a worker
+// within a CICD system.
+func CICDWorkerName(val string) attribute.KeyValue {
+ return CICDWorkerNameKey.String(val)
+}
+
+// CICDWorkerURLFull returns an attribute KeyValue conforming to the
+// "cicd.worker.url.full" semantic conventions. It represents the [URL] of the
+// worker, providing the complete address in order to locate and identify the
+// worker.
+//
+// [URL]: https://wikipedia.org/wiki/URL
+func CICDWorkerURLFull(val string) attribute.KeyValue {
+ return CICDWorkerURLFullKey.String(val)
+}
+
+// Enum values for cicd.pipeline.action.name
+var (
+ // The pipeline run is executing a build.
+ // Stability: development
+ CICDPipelineActionNameBuild = CICDPipelineActionNameKey.String("BUILD")
+ // The pipeline run is executing.
+ // Stability: development
+ CICDPipelineActionNameRun = CICDPipelineActionNameKey.String("RUN")
+ // The pipeline run is executing a sync.
+ // Stability: development
+ CICDPipelineActionNameSync = CICDPipelineActionNameKey.String("SYNC")
+)
+
+// Enum values for cicd.pipeline.result
+var (
+ // The pipeline run finished successfully.
+ // Stability: development
+ CICDPipelineResultSuccess = CICDPipelineResultKey.String("success")
+ // The pipeline run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the pipeline run.
+ // Stability: development
+ CICDPipelineResultFailure = CICDPipelineResultKey.String("failure")
+ // The pipeline run failed due to an error in the CICD system, eg. due to the
+ // worker being killed.
+ // Stability: development
+ CICDPipelineResultError = CICDPipelineResultKey.String("error")
+ // A timeout caused the pipeline run to be interrupted.
+ // Stability: development
+ CICDPipelineResultTimeout = CICDPipelineResultKey.String("timeout")
+ // The pipeline run was cancelled, eg. by a user manually cancelling the
+ // pipeline run.
+ // Stability: development
+ CICDPipelineResultCancellation = CICDPipelineResultKey.String("cancellation")
+ // The pipeline run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineResultSkip = CICDPipelineResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.run.state
+var (
+ // The run pending state spans from the event triggering the pipeline run until
+ // the execution of the run starts (eg. time spent in a queue, provisioning
+ // agents, creating run resources).
+ //
+ // Stability: development
+ CICDPipelineRunStatePending = CICDPipelineRunStateKey.String("pending")
+ // The executing state spans the execution of any run tasks (eg. build, test).
+ // Stability: development
+ CICDPipelineRunStateExecuting = CICDPipelineRunStateKey.String("executing")
+ // The finalizing state spans from when the run has finished executing (eg.
+ // cleanup of run resources).
+ // Stability: development
+ CICDPipelineRunStateFinalizing = CICDPipelineRunStateKey.String("finalizing")
+)
+
+// Enum values for cicd.pipeline.task.run.result
+var (
+ // The task run finished successfully.
+ // Stability: development
+ CICDPipelineTaskRunResultSuccess = CICDPipelineTaskRunResultKey.String("success")
+ // The task run did not finish successfully, eg. due to a compile error or a
+ // failing test. Such failures are usually detected by non-zero exit codes of
+ // the tools executed in the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultFailure = CICDPipelineTaskRunResultKey.String("failure")
+ // The task run failed due to an error in the CICD system, eg. due to the worker
+ // being killed.
+ // Stability: development
+ CICDPipelineTaskRunResultError = CICDPipelineTaskRunResultKey.String("error")
+ // A timeout caused the task run to be interrupted.
+ // Stability: development
+ CICDPipelineTaskRunResultTimeout = CICDPipelineTaskRunResultKey.String("timeout")
+ // The task run was cancelled, eg. by a user manually cancelling the task run.
+ // Stability: development
+ CICDPipelineTaskRunResultCancellation = CICDPipelineTaskRunResultKey.String("cancellation")
+ // The task run was skipped, eg. due to a precondition not being met.
+ // Stability: development
+ CICDPipelineTaskRunResultSkip = CICDPipelineTaskRunResultKey.String("skip")
+)
+
+// Enum values for cicd.pipeline.task.type
+var (
+ // build
+ // Stability: development
+ CICDPipelineTaskTypeBuild = CICDPipelineTaskTypeKey.String("build")
+ // test
+ // Stability: development
+ CICDPipelineTaskTypeTest = CICDPipelineTaskTypeKey.String("test")
+ // deploy
+ // Stability: development
+ CICDPipelineTaskTypeDeploy = CICDPipelineTaskTypeKey.String("deploy")
+)
+
+// Enum values for cicd.worker.state
+var (
+ // The worker is not performing work for the CICD system. It is available to the
+ // CICD system to perform work on (online / idle).
+ // Stability: development
+ CICDWorkerStateAvailable = CICDWorkerStateKey.String("available")
+ // The worker is performing work for the CICD system.
+ // Stability: development
+ CICDWorkerStateBusy = CICDWorkerStateKey.String("busy")
+ // The worker is not available to the CICD system (disconnected / down).
+ // Stability: development
+ CICDWorkerStateOffline = CICDWorkerStateKey.String("offline")
+)
+
+// Namespace: client
+const (
+ // ClientAddressKey is the attribute Key conforming to the "client.address"
+ // semantic conventions. It represents the client address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "client.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.address` SHOULD represent the client address behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientAddressKey = attribute.Key("client.address")
+
+ // ClientPortKey is the attribute Key conforming to the "client.port" semantic
+ // conventions. It represents the client port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ // Note: When observed from the server side, and when communicating through an
+ // intermediary, `client.port` SHOULD represent the client port behind any
+ // intermediaries, for example proxies, if it's available.
+ ClientPortKey = attribute.Key("client.port")
+)
+
+// ClientAddress returns an attribute KeyValue conforming to the "client.address"
+// semantic conventions. It represents the client address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func ClientAddress(val string) attribute.KeyValue {
+ return ClientAddressKey.String(val)
+}
+
+// ClientPort returns an attribute KeyValue conforming to the "client.port"
+// semantic conventions. It represents the client port number.
+func ClientPort(val int) attribute.KeyValue {
+ return ClientPortKey.Int(val)
+}
+
+// Namespace: cloud
+const (
+ // CloudAccountIDKey is the attribute Key conforming to the "cloud.account.id"
+ // semantic conventions. It represents the cloud account ID the resource is
+ // assigned to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "111111111111", "opentelemetry"
+ CloudAccountIDKey = attribute.Key("cloud.account.id")
+
+ // CloudAvailabilityZoneKey is the attribute Key conforming to the
+ // "cloud.availability_zone" semantic conventions. It represents the cloud
+ // regions often have multiple, isolated locations known as zones to increase
+ // availability. Availability zone represents the zone where the resource is
+ // running.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-east-1c"
+ // Note: Availability zones are called "zones" on Alibaba Cloud and Google
+ // Cloud.
+ CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone")
+
+ // CloudPlatformKey is the attribute Key conforming to the "cloud.platform"
+ // semantic conventions. It represents the cloud platform in use.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The prefix of the service SHOULD match the one specified in
+ // `cloud.provider`.
+ CloudPlatformKey = attribute.Key("cloud.platform")
+
+ // CloudProviderKey is the attribute Key conforming to the "cloud.provider"
+ // semantic conventions. It represents the name of the cloud provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ CloudProviderKey = attribute.Key("cloud.provider")
+
+ // CloudRegionKey is the attribute Key conforming to the "cloud.region" semantic
+ // conventions. It represents the geographical region within a cloud provider.
+ // When associated with a resource, this attribute specifies the region where
+ // the resource operates. When calling services or APIs deployed on a cloud,
+ // this attribute identifies the region where the called destination is
+ // deployed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1", "us-east-1"
+ // Note: Refer to your provider's docs to see the available regions, for example
+ // [Alibaba Cloud regions], [AWS regions], [Azure regions],
+ // [Google Cloud regions], or [Tencent Cloud regions].
+ //
+ // [Alibaba Cloud regions]: https://www.alibabacloud.com/help/doc-detail/40654.htm
+ // [AWS regions]: https://aws.amazon.com/about-aws/global-infrastructure/regions_az/
+ // [Azure regions]: https://azure.microsoft.com/global-infrastructure/geographies/
+ // [Google Cloud regions]: https://cloud.google.com/about/locations
+ // [Tencent Cloud regions]: https://www.tencentcloud.com/document/product/213/6091
+ CloudRegionKey = attribute.Key("cloud.region")
+
+ // CloudResourceIDKey is the attribute Key conforming to the "cloud.resource_id"
+ // semantic conventions. It represents the cloud provider-specific native
+ // identifier of the monitored cloud resource (e.g. an [ARN] on AWS, a
+ // [fully qualified resource ID] on Azure, a [full resource name] on GCP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "arn:aws:lambda:REGION:ACCOUNT_ID:function:my-function",
+ // "//run.googleapis.com/projects/PROJECT_ID/locations/LOCATION_ID/services/SERVICE_ID",
+ // "/subscriptions//resourceGroups/
+ // /providers/Microsoft.Web/sites//functions/"
+ // Note: On some cloud providers, it may not be possible to determine the full
+ // ID at startup,
+ // so it may be necessary to set `cloud.resource_id` as a span attribute
+ // instead.
+ //
+ // The exact value to use for `cloud.resource_id` depends on the cloud provider.
+ // The following well-known definitions MUST be used if you set this attribute
+ // and they apply:
+ //
+ // - **AWS Lambda:** The function [ARN].
+ // Take care not to use the "invoked ARN" directly but replace any
+ // [alias suffix]
+ // with the resolved function version, as the same runtime instance may be
+ // invocable with
+ // multiple different aliases.
+ // - **GCP:** The [URI of the resource]
+ // - **Azure:** The [Fully Qualified Resource ID] of the invoked function,
+ // *not* the function app, having the form
+ //
+ // `/subscriptions//resourceGroups//providers/Microsoft.Web/sites//functions/`
+ // .
+ // This means that a span attribute MUST be used, as an Azure function app
+ // can host multiple functions that would usually share
+ // a TracerProvider.
+ //
+ //
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ // [full resource name]: https://google.aip.dev/122#full-resource-names
+ // [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+ // [alias suffix]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-aliases.html
+ // [URI of the resource]: https://cloud.google.com/iam/docs/full-resource-names
+ // [Fully Qualified Resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+ CloudResourceIDKey = attribute.Key("cloud.resource_id")
+)
+
+// CloudAccountID returns an attribute KeyValue conforming to the
+// "cloud.account.id" semantic conventions. It represents the cloud account ID
+// the resource is assigned to.
+func CloudAccountID(val string) attribute.KeyValue {
+ return CloudAccountIDKey.String(val)
+}
+
+// CloudAvailabilityZone returns an attribute KeyValue conforming to the
+// "cloud.availability_zone" semantic conventions. It represents the cloud
+// regions often have multiple, isolated locations known as zones to increase
+// availability. Availability zone represents the zone where the resource is
+// running.
+func CloudAvailabilityZone(val string) attribute.KeyValue {
+ return CloudAvailabilityZoneKey.String(val)
+}
+
+// CloudRegion returns an attribute KeyValue conforming to the "cloud.region"
+// semantic conventions. It represents the geographical region within a cloud
+// provider. When associated with a resource, this attribute specifies the region
+// where the resource operates. When calling services or APIs deployed on a
+// cloud, this attribute identifies the region where the called destination is
+// deployed.
+func CloudRegion(val string) attribute.KeyValue {
+ return CloudRegionKey.String(val)
+}
+
+// CloudResourceID returns an attribute KeyValue conforming to the
+// "cloud.resource_id" semantic conventions. It represents the cloud
+// provider-specific native identifier of the monitored cloud resource (e.g. an
+// [ARN] on AWS, a [fully qualified resource ID] on Azure, a [full resource name]
+// on GCP).
+//
+// [ARN]: https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html
+// [fully qualified resource ID]: https://learn.microsoft.com/rest/api/resources/resources/get-by-id
+// [full resource name]: https://google.aip.dev/122#full-resource-names
+func CloudResourceID(val string) attribute.KeyValue {
+ return CloudResourceIDKey.String(val)
+}
+
+// Enum values for cloud.platform
+var (
+ // Alibaba Cloud Elastic Compute Service
+ // Stability: development
+ CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs")
+ // Alibaba Cloud Function Compute
+ // Stability: development
+ CloudPlatformAlibabaCloudFC = CloudPlatformKey.String("alibaba_cloud_fc")
+ // Red Hat OpenShift on Alibaba Cloud
+ // Stability: development
+ CloudPlatformAlibabaCloudOpenShift = CloudPlatformKey.String("alibaba_cloud_openshift")
+ // AWS Elastic Compute Cloud
+ // Stability: development
+ CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2")
+ // AWS Elastic Container Service
+ // Stability: development
+ CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs")
+ // AWS Elastic Kubernetes Service
+ // Stability: development
+ CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks")
+ // AWS Lambda
+ // Stability: development
+ CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda")
+ // AWS Elastic Beanstalk
+ // Stability: development
+ CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk")
+ // AWS App Runner
+ // Stability: development
+ CloudPlatformAWSAppRunner = CloudPlatformKey.String("aws_app_runner")
+ // Red Hat OpenShift on AWS (ROSA)
+ // Stability: development
+ CloudPlatformAWSOpenShift = CloudPlatformKey.String("aws_openshift")
+ // Azure Virtual Machines
+ // Stability: development
+ CloudPlatformAzureVM = CloudPlatformKey.String("azure.vm")
+ // Azure Container Apps
+ // Stability: development
+ CloudPlatformAzureContainerApps = CloudPlatformKey.String("azure.container_apps")
+ // Azure Container Instances
+ // Stability: development
+ CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure.container_instances")
+ // Azure Kubernetes Service
+ // Stability: development
+ CloudPlatformAzureAKS = CloudPlatformKey.String("azure.aks")
+ // Azure Functions
+ // Stability: development
+ CloudPlatformAzureFunctions = CloudPlatformKey.String("azure.functions")
+ // Azure App Service
+ // Stability: development
+ CloudPlatformAzureAppService = CloudPlatformKey.String("azure.app_service")
+ // Azure Red Hat OpenShift
+ // Stability: development
+ CloudPlatformAzureOpenShift = CloudPlatformKey.String("azure.openshift")
+ // Google Bare Metal Solution (BMS)
+ // Stability: development
+ CloudPlatformGCPBareMetalSolution = CloudPlatformKey.String("gcp_bare_metal_solution")
+ // Google Cloud Compute Engine (GCE)
+ // Stability: development
+ CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine")
+ // Google Cloud Run
+ // Stability: development
+ CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run")
+ // Google Cloud Kubernetes Engine (GKE)
+ // Stability: development
+ CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine")
+ // Google Cloud Functions (GCF)
+ // Stability: development
+ CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions")
+ // Google Cloud App Engine (GAE)
+ // Stability: development
+ CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine")
+ // Red Hat OpenShift on Google Cloud
+ // Stability: development
+ CloudPlatformGCPOpenShift = CloudPlatformKey.String("gcp_openshift")
+ // Red Hat OpenShift on IBM Cloud
+ // Stability: development
+ CloudPlatformIBMCloudOpenShift = CloudPlatformKey.String("ibm_cloud_openshift")
+ // Compute on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudCompute = CloudPlatformKey.String("oracle_cloud_compute")
+ // Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudPlatformOracleCloudOKE = CloudPlatformKey.String("oracle_cloud_oke")
+ // Tencent Cloud Cloud Virtual Machine (CVM)
+ // Stability: development
+ CloudPlatformTencentCloudCVM = CloudPlatformKey.String("tencent_cloud_cvm")
+ // Tencent Cloud Elastic Kubernetes Service (EKS)
+ // Stability: development
+ CloudPlatformTencentCloudEKS = CloudPlatformKey.String("tencent_cloud_eks")
+ // Tencent Cloud Serverless Cloud Function (SCF)
+ // Stability: development
+ CloudPlatformTencentCloudSCF = CloudPlatformKey.String("tencent_cloud_scf")
+)
+
+// Enum values for cloud.provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ CloudProviderAWS = CloudProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ CloudProviderAzure = CloudProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ CloudProviderGCP = CloudProviderKey.String("gcp")
+ // Heroku Platform as a Service
+ // Stability: development
+ CloudProviderHeroku = CloudProviderKey.String("heroku")
+ // IBM Cloud
+ // Stability: development
+ CloudProviderIBMCloud = CloudProviderKey.String("ibm_cloud")
+ // Oracle Cloud Infrastructure (OCI)
+ // Stability: development
+ CloudProviderOracleCloud = CloudProviderKey.String("oracle_cloud")
+ // Tencent Cloud
+ // Stability: development
+ CloudProviderTencentCloud = CloudProviderKey.String("tencent_cloud")
+)
+
+// Namespace: cloudevents
+const (
+ // CloudEventsEventIDKey is the attribute Key conforming to the
+ // "cloudevents.event_id" semantic conventions. It represents the [event_id]
+ // uniquely identifies the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123e4567-e89b-12d3-a456-426614174000", "0001"
+ //
+ // [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+ CloudEventsEventIDKey = attribute.Key("cloudevents.event_id")
+
+ // CloudEventsEventSourceKey is the attribute Key conforming to the
+ // "cloudevents.event_source" semantic conventions. It represents the [source]
+ // identifies the context in which an event happened.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://github.com/cloudevents", "/cloudevents/spec/pull/123",
+ // "my-service"
+ //
+ // [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+ CloudEventsEventSourceKey = attribute.Key("cloudevents.event_source")
+
+ // CloudEventsEventSpecVersionKey is the attribute Key conforming to the
+ // "cloudevents.event_spec_version" semantic conventions. It represents the
+ // [version of the CloudEvents specification] which the event uses.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ //
+ // [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+ CloudEventsEventSpecVersionKey = attribute.Key("cloudevents.event_spec_version")
+
+ // CloudEventsEventSubjectKey is the attribute Key conforming to the
+ // "cloudevents.event_subject" semantic conventions. It represents the [subject]
+ // of the event in the context of the event producer (identified by source).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: mynewfile.jpg
+ //
+ // [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+ CloudEventsEventSubjectKey = attribute.Key("cloudevents.event_subject")
+
+ // CloudEventsEventTypeKey is the attribute Key conforming to the
+ // "cloudevents.event_type" semantic conventions. It represents the [event_type]
+ // contains a value describing the type of event related to the originating
+ // occurrence.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com.github.pull_request.opened", "com.example.object.deleted.v2"
+ //
+ // [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+ CloudEventsEventTypeKey = attribute.Key("cloudevents.event_type")
+)
+
+// CloudEventsEventID returns an attribute KeyValue conforming to the
+// "cloudevents.event_id" semantic conventions. It represents the [event_id]
+// uniquely identifies the event.
+//
+// [event_id]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#id
+func CloudEventsEventID(val string) attribute.KeyValue {
+ return CloudEventsEventIDKey.String(val)
+}
+
+// CloudEventsEventSource returns an attribute KeyValue conforming to the
+// "cloudevents.event_source" semantic conventions. It represents the [source]
+// identifies the context in which an event happened.
+//
+// [source]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1
+func CloudEventsEventSource(val string) attribute.KeyValue {
+ return CloudEventsEventSourceKey.String(val)
+}
+
+// CloudEventsEventSpecVersion returns an attribute KeyValue conforming to the
+// "cloudevents.event_spec_version" semantic conventions. It represents the
+// [version of the CloudEvents specification] which the event uses.
+//
+// [version of the CloudEvents specification]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#specversion
+func CloudEventsEventSpecVersion(val string) attribute.KeyValue {
+ return CloudEventsEventSpecVersionKey.String(val)
+}
+
+// CloudEventsEventSubject returns an attribute KeyValue conforming to the
+// "cloudevents.event_subject" semantic conventions. It represents the [subject]
+// of the event in the context of the event producer (identified by source).
+//
+// [subject]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#subject
+func CloudEventsEventSubject(val string) attribute.KeyValue {
+ return CloudEventsEventSubjectKey.String(val)
+}
+
+// CloudEventsEventType returns an attribute KeyValue conforming to the
+// "cloudevents.event_type" semantic conventions. It represents the [event_type]
+// contains a value describing the type of event related to the originating
+// occurrence.
+//
+// [event_type]: https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type
+func CloudEventsEventType(val string) attribute.KeyValue {
+ return CloudEventsEventTypeKey.String(val)
+}
+
+// Namespace: cloudfoundry
+const (
+ // CloudFoundryAppIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_id`. This is the same value as
+ // reported by `cf app --guid`.
+ CloudFoundryAppIDKey = attribute.Key("cloudfoundry.app.id")
+
+ // CloudFoundryAppInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+ // of the application instance. 0 when just one instance is active.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0", "1"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the application instance index for applications
+ // deployed on the runtime.
+ //
+ // Application instrumentation should use the value from environment
+ // variable `CF_INSTANCE_INDEX`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ CloudFoundryAppInstanceIDKey = attribute.Key("cloudfoundry.app.instance.id")
+
+ // CloudFoundryAppNameKey is the attribute Key conforming to the
+ // "cloudfoundry.app.name" semantic conventions. It represents the name of the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-app-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.application_name`. This is the same value
+ // as reported by `cf apps`.
+ CloudFoundryAppNameKey = attribute.Key("cloudfoundry.app.name")
+
+ // CloudFoundryOrgIDKey is the attribute Key conforming to the
+ // "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+ // CloudFoundry org the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_id`. This is the same value as
+ // reported by `cf org --guid`.
+ CloudFoundryOrgIDKey = attribute.Key("cloudfoundry.org.id")
+
+ // CloudFoundryOrgNameKey is the attribute Key conforming to the
+ // "cloudfoundry.org.name" semantic conventions. It represents the name of the
+ // CloudFoundry organization the app is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.org_name`. This is the same value as
+ // reported by `cf orgs`.
+ CloudFoundryOrgNameKey = attribute.Key("cloudfoundry.org.name")
+
+ // CloudFoundryProcessIDKey is the attribute Key conforming to the
+ // "cloudfoundry.process.id" semantic conventions. It represents the UID
+ // identifying the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.process_id`. It is supposed to be equal to
+ // `VCAP_APPLICATION.app_id` for applications deployed to the runtime.
+ // For system components, this could be the actual PID.
+ CloudFoundryProcessIDKey = attribute.Key("cloudfoundry.process.id")
+
+ // CloudFoundryProcessTypeKey is the attribute Key conforming to the
+ // "cloudfoundry.process.type" semantic conventions. It represents the type of
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "web"
+ // Note: CloudFoundry applications can consist of multiple jobs. Usually the
+ // main process will be of type `web`. There can be additional background
+ // tasks or side-cars with different process types.
+ CloudFoundryProcessTypeKey = attribute.Key("cloudfoundry.process.type")
+
+ // CloudFoundrySpaceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_id`. This is the same value as
+ // reported by `cf space --guid`.
+ CloudFoundrySpaceIDKey = attribute.Key("cloudfoundry.space.id")
+
+ // CloudFoundrySpaceNameKey is the attribute Key conforming to the
+ // "cloudfoundry.space.name" semantic conventions. It represents the name of the
+ // CloudFoundry space the application is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-space-name"
+ // Note: Application instrumentation should use the value from environment
+ // variable `VCAP_APPLICATION.space_name`. This is the same value as
+ // reported by `cf spaces`.
+ CloudFoundrySpaceNameKey = attribute.Key("cloudfoundry.space.name")
+
+ // CloudFoundrySystemIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.id" semantic conventions. It represents a guid or
+ // another name describing the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cf/gorouter"
+ // Note: CloudFoundry defines the `source_id` in the [Loggregator v2 envelope].
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the component name, e.g. "gorouter", for
+ // CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.id` should be set to
+ // `spec.deployment/spec.name`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemIDKey = attribute.Key("cloudfoundry.system.id")
+
+ // CloudFoundrySystemInstanceIDKey is the attribute Key conforming to the
+ // "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+ // describing the concrete instance of the event source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: CloudFoundry defines the `instance_id` in the [Loggregator v2 envelope]
+ // .
+ // It is used for logs and metrics emitted by CloudFoundry. It is
+ // supposed to contain the vm id for CloudFoundry components.
+ //
+ // When system components are instrumented, values from the
+ // [Bosh spec]
+ // should be used. The `system.instance.id` should be set to `spec.id`.
+ //
+ // [Loggregator v2 envelope]: https://github.com/cloudfoundry/loggregator-api#v2-envelope
+ // [Bosh spec]: https://bosh.io/docs/jobs/#properties-spec
+ CloudFoundrySystemInstanceIDKey = attribute.Key("cloudfoundry.system.instance.id")
+)
+
+// CloudFoundryAppID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.id" semantic conventions. It represents the guid of the
+// application.
+func CloudFoundryAppID(val string) attribute.KeyValue {
+ return CloudFoundryAppIDKey.String(val)
+}
+
+// CloudFoundryAppInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.instance.id" semantic conventions. It represents the index
+// of the application instance. 0 when just one instance is active.
+func CloudFoundryAppInstanceID(val string) attribute.KeyValue {
+ return CloudFoundryAppInstanceIDKey.String(val)
+}
+
+// CloudFoundryAppName returns an attribute KeyValue conforming to the
+// "cloudfoundry.app.name" semantic conventions. It represents the name of the
+// application.
+func CloudFoundryAppName(val string) attribute.KeyValue {
+ return CloudFoundryAppNameKey.String(val)
+}
+
+// CloudFoundryOrgID returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.id" semantic conventions. It represents the guid of the
+// CloudFoundry org the application is running in.
+func CloudFoundryOrgID(val string) attribute.KeyValue {
+ return CloudFoundryOrgIDKey.String(val)
+}
+
+// CloudFoundryOrgName returns an attribute KeyValue conforming to the
+// "cloudfoundry.org.name" semantic conventions. It represents the name of the
+// CloudFoundry organization the app is running in.
+func CloudFoundryOrgName(val string) attribute.KeyValue {
+ return CloudFoundryOrgNameKey.String(val)
+}
+
+// CloudFoundryProcessID returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.id" semantic conventions. It represents the UID
+// identifying the process.
+func CloudFoundryProcessID(val string) attribute.KeyValue {
+ return CloudFoundryProcessIDKey.String(val)
+}
+
+// CloudFoundryProcessType returns an attribute KeyValue conforming to the
+// "cloudfoundry.process.type" semantic conventions. It represents the type of
+// process.
+func CloudFoundryProcessType(val string) attribute.KeyValue {
+ return CloudFoundryProcessTypeKey.String(val)
+}
+
+// CloudFoundrySpaceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.id" semantic conventions. It represents the guid of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceID(val string) attribute.KeyValue {
+ return CloudFoundrySpaceIDKey.String(val)
+}
+
+// CloudFoundrySpaceName returns an attribute KeyValue conforming to the
+// "cloudfoundry.space.name" semantic conventions. It represents the name of the
+// CloudFoundry space the application is running in.
+func CloudFoundrySpaceName(val string) attribute.KeyValue {
+ return CloudFoundrySpaceNameKey.String(val)
+}
+
+// CloudFoundrySystemID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.id" semantic conventions. It represents a guid or another
+// name describing the event source.
+func CloudFoundrySystemID(val string) attribute.KeyValue {
+ return CloudFoundrySystemIDKey.String(val)
+}
+
+// CloudFoundrySystemInstanceID returns an attribute KeyValue conforming to the
+// "cloudfoundry.system.instance.id" semantic conventions. It represents a guid
+// describing the concrete instance of the event source.
+func CloudFoundrySystemInstanceID(val string) attribute.KeyValue {
+ return CloudFoundrySystemInstanceIDKey.String(val)
+}
+
+// Namespace: code
+const (
+ // CodeColumnNumberKey is the attribute Key conforming to the
+ // "code.column.number" semantic conventions. It represents the column number in
+ // `code.file.path` best representing the operation. It SHOULD point within the
+ // code unit named in `code.function.name`. This attribute MUST NOT be used on
+ // the Profile signal since the data is already captured in 'message Line'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeColumnNumberKey = attribute.Key("code.column.number")
+
+ // CodeFilePathKey is the attribute Key conforming to the "code.file.path"
+ // semantic conventions. It represents the source code file name that identifies
+ // the code unit as uniquely as possible (preferably an absolute file path).
+ // This attribute MUST NOT be used on the Profile signal since the data is
+ // already captured in 'message Function'. This constraint is imposed to prevent
+ // redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: /usr/local/MyApplication/content_root/app/index.php
+ CodeFilePathKey = attribute.Key("code.file.path")
+
+ // CodeFunctionNameKey is the attribute Key conforming to the
+ // "code.function.name" semantic conventions. It represents the method or
+ // function fully-qualified name without arguments. The value should fit the
+ // natural representation of the language runtime, which is also likely the same
+ // used within `code.stacktrace` attribute value. This attribute MUST NOT be
+ // used on the Profile signal since the data is already captured in 'message
+ // Function'. This constraint is imposed to prevent redundancy and maintain data
+ // integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "com.example.MyHttpService.serveRequest",
+ // "GuzzleHttp\Client::transfer", "fopen"
+ // Note: Values and format depends on each language runtime, thus it is
+ // impossible to provide an exhaustive list of examples.
+ // The values are usually the same (or prefixes of) the ones found in native
+ // stack trace representation stored in
+ // `code.stacktrace` without information on arguments.
+ //
+ // Examples:
+ //
+ // - Java method: `com.example.MyHttpService.serveRequest`
+ // - Java anonymous class method: `com.mycompany.Main$1.myMethod`
+ // - Java lambda method:
+ // `com.mycompany.Main$$Lambda/0x0000748ae4149c00.myMethod`
+ // - PHP function: `GuzzleHttp\Client::transfer`
+ // - Go function: `github.com/my/repo/pkg.foo.func5`
+ // - Elixir: `OpenTelemetry.Ctx.new`
+ // - Erlang: `opentelemetry_ctx:new`
+ // - Rust: `playground::my_module::my_cool_func`
+ // - C function: `fopen`
+ CodeFunctionNameKey = attribute.Key("code.function.name")
+
+ // CodeLineNumberKey is the attribute Key conforming to the "code.line.number"
+ // semantic conventions. It represents the line number in `code.file.path` best
+ // representing the operation. It SHOULD point within the code unit named in
+ // `code.function.name`. This attribute MUST NOT be used on the Profile signal
+ // since the data is already captured in 'message Line'. This constraint is
+ // imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ CodeLineNumberKey = attribute.Key("code.line.number")
+
+ // CodeStacktraceKey is the attribute Key conforming to the "code.stacktrace"
+ // semantic conventions. It represents a stacktrace as a string in the natural
+ // representation for the language runtime. The representation is identical to
+ // [`exception.stacktrace`]. This attribute MUST NOT be used on the Profile
+ // signal since the data is already captured in 'message Location'. This
+ // constraint is imposed to prevent redundancy and maintain data integrity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ //
+ // [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+ CodeStacktraceKey = attribute.Key("code.stacktrace")
+)
+
+// CodeColumnNumber returns an attribute KeyValue conforming to the
+// "code.column.number" semantic conventions. It represents the column number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeColumnNumber(val int) attribute.KeyValue {
+ return CodeColumnNumberKey.Int(val)
+}
+
+// CodeFilePath returns an attribute KeyValue conforming to the "code.file.path"
+// semantic conventions. It represents the source code file name that identifies
+// the code unit as uniquely as possible (preferably an absolute file path). This
+// attribute MUST NOT be used on the Profile signal since the data is already
+// captured in 'message Function'. This constraint is imposed to prevent
+// redundancy and maintain data integrity.
+func CodeFilePath(val string) attribute.KeyValue {
+ return CodeFilePathKey.String(val)
+}
+
+// CodeFunctionName returns an attribute KeyValue conforming to the
+// "code.function.name" semantic conventions. It represents the method or
+// function fully-qualified name without arguments. The value should fit the
+// natural representation of the language runtime, which is also likely the same
+// used within `code.stacktrace` attribute value. This attribute MUST NOT be used
+// on the Profile signal since the data is already captured in 'message
+// Function'. This constraint is imposed to prevent redundancy and maintain data
+// integrity.
+func CodeFunctionName(val string) attribute.KeyValue {
+ return CodeFunctionNameKey.String(val)
+}
+
+// CodeLineNumber returns an attribute KeyValue conforming to the
+// "code.line.number" semantic conventions. It represents the line number in
+// `code.file.path` best representing the operation. It SHOULD point within the
+// code unit named in `code.function.name`. This attribute MUST NOT be used on
+// the Profile signal since the data is already captured in 'message Line'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+func CodeLineNumber(val int) attribute.KeyValue {
+ return CodeLineNumberKey.Int(val)
+}
+
+// CodeStacktrace returns an attribute KeyValue conforming to the
+// "code.stacktrace" semantic conventions. It represents a stacktrace as a string
+// in the natural representation for the language runtime. The representation is
+// identical to [`exception.stacktrace`]. This attribute MUST NOT be used on the
+// Profile signal since the data is already captured in 'message Location'. This
+// constraint is imposed to prevent redundancy and maintain data integrity.
+//
+// [`exception.stacktrace`]: /docs/exceptions/exceptions-spans.md#stacktrace-representation
+func CodeStacktrace(val string) attribute.KeyValue {
+ return CodeStacktraceKey.String(val)
+}
+
+// Namespace: container
+const (
+ // ContainerCommandKey is the attribute Key conforming to the
+ // "container.command" semantic conventions. It represents the command used to
+ // run the container (i.e. the command name).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol"
+ // Note: If using embedded credentials or sensitive data, it is recommended to
+ // remove them to prevent potential leakage.
+ ContainerCommandKey = attribute.Key("container.command")
+
+ // ContainerCommandArgsKey is the attribute Key conforming to the
+ // "container.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) run by the
+ // container.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol", "--config", "config.yaml"
+ ContainerCommandArgsKey = attribute.Key("container.command_args")
+
+ // ContainerCommandLineKey is the attribute Key conforming to the
+ // "container.command_line" semantic conventions. It represents the full command
+ // run by the container as a single string representing the full command.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcontribcol --config config.yaml"
+ ContainerCommandLineKey = attribute.Key("container.command_line")
+
+ // ContainerCSIPluginNameKey is the attribute Key conforming to the
+ // "container.csi.plugin.name" semantic conventions. It represents the name of
+ // the CSI ([Container Storage Interface]) plugin used by the volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pd.csi.storage.gke.io"
+ // Note: This can sometimes be referred to as a "driver" in CSI implementations.
+ // This should represent the `name` field of the GetPluginInfo RPC.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIPluginNameKey = attribute.Key("container.csi.plugin.name")
+
+ // ContainerCSIVolumeIDKey is the attribute Key conforming to the
+ // "container.csi.volume.id" semantic conventions. It represents the unique
+ // volume ID returned by the CSI ([Container Storage Interface]) plugin.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-gcp-project/zones/my-gcp-zone/disks/my-gcp-disk"
+ // Note: This can sometimes be referred to as a "volume handle" in CSI
+ // implementations. This should represent the `Volume.volume_id` field in CSI
+ // spec.
+ //
+ // [Container Storage Interface]: https://github.com/container-storage-interface/spec
+ ContainerCSIVolumeIDKey = attribute.Key("container.csi.volume.id")
+
+ // ContainerIDKey is the attribute Key conforming to the "container.id" semantic
+ // conventions. It represents the container ID. Usually a UUID, as for example
+ // used to [identify Docker containers]. The UUID might be abbreviated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a3bf90e006b2"
+ //
+ // [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+ ContainerIDKey = attribute.Key("container.id")
+
+ // ContainerImageIDKey is the attribute Key conforming to the
+ // "container.image.id" semantic conventions. It represents the runtime specific
+ // image identifier. Usually a hash algorithm followed by a UUID.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:19c92d0a00d1b66d897bceaa7319bee0dd38a10a851c60bcec9474aa3f01e50f"
+ // Note: Docker defines a sha256 of the image id; `container.image.id`
+ // corresponds to the `Image` field from the Docker container inspect [API]
+ // endpoint.
+ // K8s defines a link to the container registry repository with digest
+ // `"imageID": "registry.azurecr.io /namespace/service/dockerfile@sha256:bdeabd40c3a8a492eaf9e8e44d0ebbb84bac7ee25ac0cf8a7159d25f62555625"`
+ // .
+ // The ID is assigned by the container runtime and can vary in different
+ // environments. Consider using `oci.manifest.digest` if it is important to
+ // identify the same image in different environments/runtimes.
+ //
+ // [API]: https://docs.docker.com/engine/api/v1.43/#tag/Container/operation/ContainerInspect
+ ContainerImageIDKey = attribute.Key("container.image.id")
+
+ // ContainerImageNameKey is the attribute Key conforming to the
+ // "container.image.name" semantic conventions. It represents the name of the
+ // image the container was built on.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gcr.io/opentelemetry/operator"
+ ContainerImageNameKey = attribute.Key("container.image.name")
+
+ // ContainerImageRepoDigestsKey is the attribute Key conforming to the
+ // "container.image.repo_digests" semantic conventions. It represents the repo
+ // digests of the container image as provided by the container runtime.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "example@sha256:afcc7f1ac1b49db317a7196c902e61c6c3c4607d63599ee1a82d702d249a0ccb",
+ // "internal.registry.example.com:5000/example@sha256:b69959407d21e8a062e0416bf13405bb2b71ed7a84dde4158ebafacfa06f5578"
+ // Note: [Docker] and [CRI] report those under the `RepoDigests` field.
+ //
+ // [Docker]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ // [CRI]: https://github.com/kubernetes/cri-api/blob/c75ef5b473bbe2d0a4fc92f82235efd665ea8e9f/pkg/apis/runtime/v1/api.proto#L1237-L1238
+ ContainerImageRepoDigestsKey = attribute.Key("container.image.repo_digests")
+
+ // ContainerImageTagsKey is the attribute Key conforming to the
+ // "container.image.tags" semantic conventions. It represents the container
+ // image tags. An example can be found in [Docker Image Inspect]. Should be only
+ // the `` section of the full name for example from
+ // `registry.example.com/my-org/my-image:`.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "v1.27.1", "3.5.7-0"
+ //
+ // [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+ ContainerImageTagsKey = attribute.Key("container.image.tags")
+
+ // ContainerNameKey is the attribute Key conforming to the "container.name"
+ // semantic conventions. It represents the container name used by container
+ // runtime.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-autoconf"
+ ContainerNameKey = attribute.Key("container.name")
+
+ // ContainerRuntimeDescriptionKey is the attribute Key conforming to the
+ // "container.runtime.description" semantic conventions. It represents a
+ // description about the runtime which could include, for example details about
+ // the CRI/API version being used or other customisations.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker://19.3.1 - CRI: 1.22.0"
+ ContainerRuntimeDescriptionKey = attribute.Key("container.runtime.description")
+
+ // ContainerRuntimeNameKey is the attribute Key conforming to the
+ // "container.runtime.name" semantic conventions. It represents the container
+ // runtime managing this container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "docker", "containerd", "rkt"
+ ContainerRuntimeNameKey = attribute.Key("container.runtime.name")
+
+ // ContainerRuntimeVersionKey is the attribute Key conforming to the
+ // "container.runtime.version" semantic conventions. It represents the version
+ // of the runtime of this process, as returned by the runtime without
+ // modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0.0
+ ContainerRuntimeVersionKey = attribute.Key("container.runtime.version")
+)
+
+// ContainerCommand returns an attribute KeyValue conforming to the
+// "container.command" semantic conventions. It represents the command used to
+// run the container (i.e. the command name).
+func ContainerCommand(val string) attribute.KeyValue {
+ return ContainerCommandKey.String(val)
+}
+
+// ContainerCommandArgs returns an attribute KeyValue conforming to the
+// "container.command_args" semantic conventions. It represents the all the
+// command arguments (including the command/executable itself) run by the
+// container.
+func ContainerCommandArgs(val ...string) attribute.KeyValue {
+ return ContainerCommandArgsKey.StringSlice(val)
+}
+
+// ContainerCommandLine returns an attribute KeyValue conforming to the
+// "container.command_line" semantic conventions. It represents the full command
+// run by the container as a single string representing the full command.
+func ContainerCommandLine(val string) attribute.KeyValue {
+ return ContainerCommandLineKey.String(val)
+}
+
+// ContainerCSIPluginName returns an attribute KeyValue conforming to the
+// "container.csi.plugin.name" semantic conventions. It represents the name of
+// the CSI ([Container Storage Interface]) plugin used by the volume.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIPluginName(val string) attribute.KeyValue {
+ return ContainerCSIPluginNameKey.String(val)
+}
+
+// ContainerCSIVolumeID returns an attribute KeyValue conforming to the
+// "container.csi.volume.id" semantic conventions. It represents the unique
+// volume ID returned by the CSI ([Container Storage Interface]) plugin.
+//
+// [Container Storage Interface]: https://github.com/container-storage-interface/spec
+func ContainerCSIVolumeID(val string) attribute.KeyValue {
+ return ContainerCSIVolumeIDKey.String(val)
+}
+
+// ContainerID returns an attribute KeyValue conforming to the "container.id"
+// semantic conventions. It represents the container ID. Usually a UUID, as for
+// example used to [identify Docker containers]. The UUID might be abbreviated.
+//
+// [identify Docker containers]: https://docs.docker.com/engine/containers/run/#container-identification
+func ContainerID(val string) attribute.KeyValue {
+ return ContainerIDKey.String(val)
+}
+
+// ContainerImageID returns an attribute KeyValue conforming to the
+// "container.image.id" semantic conventions. It represents the runtime specific
+// image identifier. Usually a hash algorithm followed by a UUID.
+func ContainerImageID(val string) attribute.KeyValue {
+ return ContainerImageIDKey.String(val)
+}
+
+// ContainerImageName returns an attribute KeyValue conforming to the
+// "container.image.name" semantic conventions. It represents the name of the
+// image the container was built on.
+func ContainerImageName(val string) attribute.KeyValue {
+ return ContainerImageNameKey.String(val)
+}
+
+// ContainerImageRepoDigests returns an attribute KeyValue conforming to the
+// "container.image.repo_digests" semantic conventions. It represents the repo
+// digests of the container image as provided by the container runtime.
+func ContainerImageRepoDigests(val ...string) attribute.KeyValue {
+ return ContainerImageRepoDigestsKey.StringSlice(val)
+}
+
+// ContainerImageTags returns an attribute KeyValue conforming to the
+// "container.image.tags" semantic conventions. It represents the container image
+// tags. An example can be found in [Docker Image Inspect]. Should be only the
+// `` section of the full name for example from
+// `registry.example.com/my-org/my-image:`.
+//
+// [Docker Image Inspect]: https://docs.docker.com/engine/api/v1.43/#tag/Image/operation/ImageInspect
+func ContainerImageTags(val ...string) attribute.KeyValue {
+ return ContainerImageTagsKey.StringSlice(val)
+}
+
+// ContainerLabel returns an attribute KeyValue conforming to the
+// "container.label" semantic conventions. It represents the container labels,
+// `` being the label name, the value being the label value.
+func ContainerLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("container.label."+key, val)
+}
+
+// ContainerName returns an attribute KeyValue conforming to the "container.name"
+// semantic conventions. It represents the container name used by container
+// runtime.
+func ContainerName(val string) attribute.KeyValue {
+ return ContainerNameKey.String(val)
+}
+
+// ContainerRuntimeDescription returns an attribute KeyValue conforming to the
+// "container.runtime.description" semantic conventions. It represents a
+// description about the runtime which could include, for example details about
+// the CRI/API version being used or other customisations.
+func ContainerRuntimeDescription(val string) attribute.KeyValue {
+ return ContainerRuntimeDescriptionKey.String(val)
+}
+
+// ContainerRuntimeName returns an attribute KeyValue conforming to the
+// "container.runtime.name" semantic conventions. It represents the container
+// runtime managing this container.
+func ContainerRuntimeName(val string) attribute.KeyValue {
+ return ContainerRuntimeNameKey.String(val)
+}
+
+// ContainerRuntimeVersion returns an attribute KeyValue conforming to the
+// "container.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ContainerRuntimeVersion(val string) attribute.KeyValue {
+ return ContainerRuntimeVersionKey.String(val)
+}
+
+// Namespace: cpu
+const (
+ // CPULogicalNumberKey is the attribute Key conforming to the
+ // "cpu.logical_number" semantic conventions. It represents the logical CPU
+ // number [0..n-1].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ CPULogicalNumberKey = attribute.Key("cpu.logical_number")
+
+ // CPUModeKey is the attribute Key conforming to the "cpu.mode" semantic
+ // conventions. It represents the mode of the CPU.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "user", "system"
+ CPUModeKey = attribute.Key("cpu.mode")
+)
+
+// CPULogicalNumber returns an attribute KeyValue conforming to the
+// "cpu.logical_number" semantic conventions. It represents the logical CPU
+// number [0..n-1].
+func CPULogicalNumber(val int) attribute.KeyValue {
+ return CPULogicalNumberKey.Int(val)
+}
+
+// Enum values for cpu.mode
+var (
+ // User
+ // Stability: development
+ CPUModeUser = CPUModeKey.String("user")
+ // System
+ // Stability: development
+ CPUModeSystem = CPUModeKey.String("system")
+ // Nice
+ // Stability: development
+ CPUModeNice = CPUModeKey.String("nice")
+ // Idle
+ // Stability: development
+ CPUModeIdle = CPUModeKey.String("idle")
+ // IO Wait
+ // Stability: development
+ CPUModeIOWait = CPUModeKey.String("iowait")
+ // Interrupt
+ // Stability: development
+ CPUModeInterrupt = CPUModeKey.String("interrupt")
+ // Steal
+ // Stability: development
+ CPUModeSteal = CPUModeKey.String("steal")
+ // Kernel
+ // Stability: development
+ CPUModeKernel = CPUModeKey.String("kernel")
+)
+
+// Namespace: db
+const (
+ // DBClientConnectionPoolNameKey is the attribute Key conforming to the
+ // "db.client.connection.pool.name" semantic conventions. It represents the name
+ // of the connection pool; unique within the instrumented application. In case
+ // the connection pool implementation doesn't provide a name, instrumentation
+ // SHOULD use a combination of parameters that would make the name unique, for
+ // example, combining attributes `server.address`, `server.port`, and
+ // `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+ // Instrumentations that generate connection pool name following different
+ // patterns SHOULD document it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myDataSource"
+ DBClientConnectionPoolNameKey = attribute.Key("db.client.connection.pool.name")
+
+ // DBClientConnectionStateKey is the attribute Key conforming to the
+ // "db.client.connection.state" semantic conventions. It represents the state of
+ // a connection in the pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "idle"
+ DBClientConnectionStateKey = attribute.Key("db.client.connection.state")
+
+ // DBCollectionNameKey is the attribute Key conforming to the
+ // "db.collection.name" semantic conventions. It represents the name of a
+ // collection (table, container) within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "public.users", "customers"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The collection name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple collections
+ // in non-batch operations.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // collection name then that collection name SHOULD be used.
+ DBCollectionNameKey = attribute.Key("db.collection.name")
+
+ // DBNamespaceKey is the attribute Key conforming to the "db.namespace" semantic
+ // conventions. It represents the name of the database, fully qualified within
+ // the server address and port.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "customers", "test.users"
+ // Note: If a database system has multiple namespace components, they SHOULD be
+ // concatenated from the most general to the most specific namespace component,
+ // using `|` as a separator between the components. Any missing components (and
+ // their associated separators) SHOULD be omitted.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.namespace` means in the context of that system.
+ // It is RECOMMENDED to capture the value as provided by the application without
+ // attempting to do any case normalization.
+ DBNamespaceKey = attribute.Key("db.namespace")
+
+ // DBOperationBatchSizeKey is the attribute Key conforming to the
+ // "db.operation.batch.size" semantic conventions. It represents the number of
+ // queries included in a batch operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 2, 3, 4
+ // Note: Operations are only considered batches when they contain two or more
+ // operations, and so `db.operation.batch.size` SHOULD never be `1`.
+ DBOperationBatchSizeKey = attribute.Key("db.operation.batch.size")
+
+ // DBOperationNameKey is the attribute Key conforming to the "db.operation.name"
+ // semantic conventions. It represents the name of the operation or command
+ // being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "findAndModify", "HMSET", "SELECT"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // The operation name SHOULD NOT be extracted from `db.query.text`,
+ // when the database system supports query text with multiple operations
+ // in non-batch operations.
+ //
+ // If spaces can occur in the operation name, multiple consecutive spaces
+ // SHOULD be normalized to a single space.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // operation name
+ // then that operation name SHOULD be used prepended by `BATCH `,
+ // otherwise `db.operation.name` SHOULD be `BATCH` or some other database
+ // system specific term if more applicable.
+ DBOperationNameKey = attribute.Key("db.operation.name")
+
+ // DBQuerySummaryKey is the attribute Key conforming to the "db.query.summary"
+ // semantic conventions. It represents the low cardinality summary of a database
+ // query.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT wuser_table", "INSERT shipping_details SELECT orders", "get
+ // user by id"
+ // Note: The query summary describes a class of database queries and is useful
+ // as a grouping key, especially when analyzing telemetry for database
+ // calls involving complex queries.
+ //
+ // Summary may be available to the instrumentation through
+ // instrumentation hooks or other means. If it is not available,
+ // instrumentations
+ // that support query parsing SHOULD generate a summary following
+ // [Generating query summary]
+ // section.
+ //
+ // [Generating query summary]: /docs/database/database-spans.md#generating-a-summary-of-the-query
+ DBQuerySummaryKey = attribute.Key("db.query.summary")
+
+ // DBQueryTextKey is the attribute Key conforming to the "db.query.text"
+ // semantic conventions. It represents the database query being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SELECT * FROM wuser_table where username = ?", "SET mykey ?"
+ // Note: For sanitization see [Sanitization of `db.query.text`].
+ // For batch operations, if the individual operations are known to have the same
+ // query text then that query text SHOULD be used, otherwise all of the
+ // individual query texts SHOULD be concatenated with separator `; ` or some
+ // other database system specific separator if more applicable.
+ // Parameterized query text SHOULD NOT be sanitized. Even though parameterized
+ // query text can potentially have sensitive data, by using a parameterized
+ // query the user is giving a strong signal that any sensitive data will be
+ // passed as parameter values, and the benefit to observability of capturing the
+ // static part of the query text by default outweighs the risk.
+ //
+ // [Sanitization of `db.query.text`]: /docs/database/database-spans.md#sanitization-of-dbquerytext
+ DBQueryTextKey = attribute.Key("db.query.text")
+
+ // DBResponseReturnedRowsKey is the attribute Key conforming to the
+ // "db.response.returned_rows" semantic conventions. It represents the number of
+ // rows returned by the operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 10, 30, 1000
+ DBResponseReturnedRowsKey = attribute.Key("db.response.returned_rows")
+
+ // DBResponseStatusCodeKey is the attribute Key conforming to the
+ // "db.response.status_code" semantic conventions. It represents the database
+ // response status code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "102", "ORA-17002", "08P01", "404"
+ // Note: The status code returned by the database. Usually it represents an
+ // error code, but may also represent partial success, warning, or differentiate
+ // between various types of successful outcomes.
+ // Semantic conventions for individual database systems SHOULD document what
+ // `db.response.status_code` means in the context of that system.
+ DBResponseStatusCodeKey = attribute.Key("db.response.status_code")
+
+ // DBStoredProcedureNameKey is the attribute Key conforming to the
+ // "db.stored_procedure.name" semantic conventions. It represents the name of a
+ // stored procedure within the database.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GetCustomer"
+ // Note: It is RECOMMENDED to capture the value as provided by the application
+ // without attempting to do any case normalization.
+ //
+ // For batch operations, if the individual operations are known to have the same
+ // stored procedure name then that stored procedure name SHOULD be used.
+ DBStoredProcedureNameKey = attribute.Key("db.stored_procedure.name")
+
+ // DBSystemNameKey is the attribute Key conforming to the "db.system.name"
+ // semantic conventions. It represents the database management system (DBMS)
+ // product as identified by the client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ // Note: The actual DBMS may differ from the one identified by the client. For
+ // example, when using PostgreSQL client libraries to connect to a CockroachDB,
+ // the `db.system.name` is set to `postgresql` based on the instrumentation's
+ // best knowledge.
+ DBSystemNameKey = attribute.Key("db.system.name")
+)
+
+// DBClientConnectionPoolName returns an attribute KeyValue conforming to the
+// "db.client.connection.pool.name" semantic conventions. It represents the name
+// of the connection pool; unique within the instrumented application. In case
+// the connection pool implementation doesn't provide a name, instrumentation
+// SHOULD use a combination of parameters that would make the name unique, for
+// example, combining attributes `server.address`, `server.port`, and
+// `db.namespace`, formatted as `server.address:server.port/db.namespace`.
+// Instrumentations that generate connection pool name following different
+// patterns SHOULD document it.
+func DBClientConnectionPoolName(val string) attribute.KeyValue {
+ return DBClientConnectionPoolNameKey.String(val)
+}
+
+// DBCollectionName returns an attribute KeyValue conforming to the
+// "db.collection.name" semantic conventions. It represents the name of a
+// collection (table, container) within the database.
+func DBCollectionName(val string) attribute.KeyValue {
+ return DBCollectionNameKey.String(val)
+}
+
+// DBNamespace returns an attribute KeyValue conforming to the "db.namespace"
+// semantic conventions. It represents the name of the database, fully qualified
+// within the server address and port.
+func DBNamespace(val string) attribute.KeyValue {
+ return DBNamespaceKey.String(val)
+}
+
+// DBOperationBatchSize returns an attribute KeyValue conforming to the
+// "db.operation.batch.size" semantic conventions. It represents the number of
+// queries included in a batch operation.
+func DBOperationBatchSize(val int) attribute.KeyValue {
+ return DBOperationBatchSizeKey.Int(val)
+}
+
+// DBOperationName returns an attribute KeyValue conforming to the
+// "db.operation.name" semantic conventions. It represents the name of the
+// operation or command being executed.
+func DBOperationName(val string) attribute.KeyValue {
+ return DBOperationNameKey.String(val)
+}
+
+// DBOperationParameter returns an attribute KeyValue conforming to the
+// "db.operation.parameter" semantic conventions. It represents a database
+// operation parameter, with `` being the parameter name, and the attribute
+// value being a string representation of the parameter value.
+func DBOperationParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.operation.parameter."+key, val)
+}
+
+// DBQueryParameter returns an attribute KeyValue conforming to the
+// "db.query.parameter" semantic conventions. It represents a database query
+// parameter, with `` being the parameter name, and the attribute value
+// being a string representation of the parameter value.
+func DBQueryParameter(key string, val string) attribute.KeyValue {
+ return attribute.String("db.query.parameter."+key, val)
+}
+
+// DBQuerySummary returns an attribute KeyValue conforming to the
+// "db.query.summary" semantic conventions. It represents the low cardinality
+// summary of a database query.
+func DBQuerySummary(val string) attribute.KeyValue {
+ return DBQuerySummaryKey.String(val)
+}
+
+// DBQueryText returns an attribute KeyValue conforming to the "db.query.text"
+// semantic conventions. It represents the database query being executed.
+func DBQueryText(val string) attribute.KeyValue {
+ return DBQueryTextKey.String(val)
+}
+
+// DBResponseReturnedRows returns an attribute KeyValue conforming to the
+// "db.response.returned_rows" semantic conventions. It represents the number of
+// rows returned by the operation.
+func DBResponseReturnedRows(val int) attribute.KeyValue {
+ return DBResponseReturnedRowsKey.Int(val)
+}
+
+// DBResponseStatusCode returns an attribute KeyValue conforming to the
+// "db.response.status_code" semantic conventions. It represents the database
+// response status code.
+func DBResponseStatusCode(val string) attribute.KeyValue {
+ return DBResponseStatusCodeKey.String(val)
+}
+
+// DBStoredProcedureName returns an attribute KeyValue conforming to the
+// "db.stored_procedure.name" semantic conventions. It represents the name of a
+// stored procedure within the database.
+func DBStoredProcedureName(val string) attribute.KeyValue {
+ return DBStoredProcedureNameKey.String(val)
+}
+
+// Enum values for db.client.connection.state
+var (
+ // idle
+ // Stability: development
+ DBClientConnectionStateIdle = DBClientConnectionStateKey.String("idle")
+ // used
+ // Stability: development
+ DBClientConnectionStateUsed = DBClientConnectionStateKey.String("used")
+)
+
+// Enum values for db.system.name
+var (
+ // Some other SQL database. Fallback only.
+ // Stability: development
+ DBSystemNameOtherSQL = DBSystemNameKey.String("other_sql")
+ // [Adabas (Adaptable Database System)]
+ // Stability: development
+ //
+ // [Adabas (Adaptable Database System)]: https://documentation.softwareag.com/?pf=adabas
+ DBSystemNameSoftwareagAdabas = DBSystemNameKey.String("softwareag.adabas")
+ // [Actian Ingres]
+ // Stability: development
+ //
+ // [Actian Ingres]: https://www.actian.com/databases/ingres/
+ DBSystemNameActianIngres = DBSystemNameKey.String("actian.ingres")
+ // [Amazon DynamoDB]
+ // Stability: development
+ //
+ // [Amazon DynamoDB]: https://aws.amazon.com/pm/dynamodb/
+ DBSystemNameAWSDynamoDB = DBSystemNameKey.String("aws.dynamodb")
+ // [Amazon Redshift]
+ // Stability: development
+ //
+ // [Amazon Redshift]: https://aws.amazon.com/redshift/
+ DBSystemNameAWSRedshift = DBSystemNameKey.String("aws.redshift")
+ // [Azure Cosmos DB]
+ // Stability: development
+ //
+ // [Azure Cosmos DB]: https://learn.microsoft.com/azure/cosmos-db
+ DBSystemNameAzureCosmosDB = DBSystemNameKey.String("azure.cosmosdb")
+ // [InterSystems Caché]
+ // Stability: development
+ //
+ // [InterSystems Caché]: https://www.intersystems.com/products/cache/
+ DBSystemNameIntersystemsCache = DBSystemNameKey.String("intersystems.cache")
+ // [Apache Cassandra]
+ // Stability: development
+ //
+ // [Apache Cassandra]: https://cassandra.apache.org/
+ DBSystemNameCassandra = DBSystemNameKey.String("cassandra")
+ // [ClickHouse]
+ // Stability: development
+ //
+ // [ClickHouse]: https://clickhouse.com/
+ DBSystemNameClickHouse = DBSystemNameKey.String("clickhouse")
+ // [CockroachDB]
+ // Stability: development
+ //
+ // [CockroachDB]: https://www.cockroachlabs.com/
+ DBSystemNameCockroachDB = DBSystemNameKey.String("cockroachdb")
+ // [Couchbase]
+ // Stability: development
+ //
+ // [Couchbase]: https://www.couchbase.com/
+ DBSystemNameCouchbase = DBSystemNameKey.String("couchbase")
+ // [Apache CouchDB]
+ // Stability: development
+ //
+ // [Apache CouchDB]: https://couchdb.apache.org/
+ DBSystemNameCouchDB = DBSystemNameKey.String("couchdb")
+ // [Apache Derby]
+ // Stability: development
+ //
+ // [Apache Derby]: https://db.apache.org/derby/
+ DBSystemNameDerby = DBSystemNameKey.String("derby")
+ // [Elasticsearch]
+ // Stability: development
+ //
+ // [Elasticsearch]: https://www.elastic.co/elasticsearch
+ DBSystemNameElasticsearch = DBSystemNameKey.String("elasticsearch")
+ // [Firebird]
+ // Stability: development
+ //
+ // [Firebird]: https://www.firebirdsql.org/
+ DBSystemNameFirebirdSQL = DBSystemNameKey.String("firebirdsql")
+ // [Google Cloud Spanner]
+ // Stability: development
+ //
+ // [Google Cloud Spanner]: https://cloud.google.com/spanner
+ DBSystemNameGCPSpanner = DBSystemNameKey.String("gcp.spanner")
+ // [Apache Geode]
+ // Stability: development
+ //
+ // [Apache Geode]: https://geode.apache.org/
+ DBSystemNameGeode = DBSystemNameKey.String("geode")
+ // [H2 Database]
+ // Stability: development
+ //
+ // [H2 Database]: https://h2database.com/
+ DBSystemNameH2database = DBSystemNameKey.String("h2database")
+ // [Apache HBase]
+ // Stability: development
+ //
+ // [Apache HBase]: https://hbase.apache.org/
+ DBSystemNameHBase = DBSystemNameKey.String("hbase")
+ // [Apache Hive]
+ // Stability: development
+ //
+ // [Apache Hive]: https://hive.apache.org/
+ DBSystemNameHive = DBSystemNameKey.String("hive")
+ // [HyperSQL Database]
+ // Stability: development
+ //
+ // [HyperSQL Database]: https://hsqldb.org/
+ DBSystemNameHSQLDB = DBSystemNameKey.String("hsqldb")
+ // [IBM Db2]
+ // Stability: development
+ //
+ // [IBM Db2]: https://www.ibm.com/db2
+ DBSystemNameIBMDB2 = DBSystemNameKey.String("ibm.db2")
+ // [IBM Informix]
+ // Stability: development
+ //
+ // [IBM Informix]: https://www.ibm.com/products/informix
+ DBSystemNameIBMInformix = DBSystemNameKey.String("ibm.informix")
+ // [IBM Netezza]
+ // Stability: development
+ //
+ // [IBM Netezza]: https://www.ibm.com/products/netezza
+ DBSystemNameIBMNetezza = DBSystemNameKey.String("ibm.netezza")
+ // [InfluxDB]
+ // Stability: development
+ //
+ // [InfluxDB]: https://www.influxdata.com/
+ DBSystemNameInfluxDB = DBSystemNameKey.String("influxdb")
+ // [Instant]
+ // Stability: development
+ //
+ // [Instant]: https://www.instantdb.com/
+ DBSystemNameInstantDB = DBSystemNameKey.String("instantdb")
+ // [MariaDB]
+ // Stability: stable
+ //
+ // [MariaDB]: https://mariadb.org/
+ DBSystemNameMariaDB = DBSystemNameKey.String("mariadb")
+ // [Memcached]
+ // Stability: development
+ //
+ // [Memcached]: https://memcached.org/
+ DBSystemNameMemcached = DBSystemNameKey.String("memcached")
+ // [MongoDB]
+ // Stability: development
+ //
+ // [MongoDB]: https://www.mongodb.com/
+ DBSystemNameMongoDB = DBSystemNameKey.String("mongodb")
+ // [Microsoft SQL Server]
+ // Stability: stable
+ //
+ // [Microsoft SQL Server]: https://www.microsoft.com/sql-server
+ DBSystemNameMicrosoftSQLServer = DBSystemNameKey.String("microsoft.sql_server")
+ // [MySQL]
+ // Stability: stable
+ //
+ // [MySQL]: https://www.mysql.com/
+ DBSystemNameMySQL = DBSystemNameKey.String("mysql")
+ // [Neo4j]
+ // Stability: development
+ //
+ // [Neo4j]: https://neo4j.com/
+ DBSystemNameNeo4j = DBSystemNameKey.String("neo4j")
+ // [OpenSearch]
+ // Stability: development
+ //
+ // [OpenSearch]: https://opensearch.org/
+ DBSystemNameOpenSearch = DBSystemNameKey.String("opensearch")
+ // [Oracle Database]
+ // Stability: development
+ //
+ // [Oracle Database]: https://www.oracle.com/database/
+ DBSystemNameOracleDB = DBSystemNameKey.String("oracle.db")
+ // [PostgreSQL]
+ // Stability: stable
+ //
+ // [PostgreSQL]: https://www.postgresql.org/
+ DBSystemNamePostgreSQL = DBSystemNameKey.String("postgresql")
+ // [Redis]
+ // Stability: development
+ //
+ // [Redis]: https://redis.io/
+ DBSystemNameRedis = DBSystemNameKey.String("redis")
+ // [SAP HANA]
+ // Stability: development
+ //
+ // [SAP HANA]: https://www.sap.com/products/technology-platform/hana/what-is-sap-hana.html
+ DBSystemNameSAPHANA = DBSystemNameKey.String("sap.hana")
+ // [SAP MaxDB]
+ // Stability: development
+ //
+ // [SAP MaxDB]: https://maxdb.sap.com/
+ DBSystemNameSAPMaxDB = DBSystemNameKey.String("sap.maxdb")
+ // [SQLite]
+ // Stability: development
+ //
+ // [SQLite]: https://www.sqlite.org/
+ DBSystemNameSQLite = DBSystemNameKey.String("sqlite")
+ // [Teradata]
+ // Stability: development
+ //
+ // [Teradata]: https://www.teradata.com/
+ DBSystemNameTeradata = DBSystemNameKey.String("teradata")
+ // [Trino]
+ // Stability: development
+ //
+ // [Trino]: https://trino.io/
+ DBSystemNameTrino = DBSystemNameKey.String("trino")
+)
+
+// Namespace: deployment
+const (
+ // DeploymentEnvironmentNameKey is the attribute Key conforming to the
+ // "deployment.environment.name" semantic conventions. It represents the name of
+ // the [deployment environment] (aka deployment tier).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "staging", "production"
+ // Note: `deployment.environment.name` does not affect the uniqueness
+ // constraints defined through
+ // the `service.namespace`, `service.name` and `service.instance.id` resource
+ // attributes.
+ // This implies that resources carrying the following attribute combinations
+ // MUST be
+ // considered to be identifying the same service:
+ //
+ // - `service.name=frontend`, `deployment.environment.name=production`
+ // - `service.name=frontend`, `deployment.environment.name=staging`.
+ //
+ //
+ // [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+ DeploymentEnvironmentNameKey = attribute.Key("deployment.environment.name")
+
+ // DeploymentIDKey is the attribute Key conforming to the "deployment.id"
+ // semantic conventions. It represents the id of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1208"
+ DeploymentIDKey = attribute.Key("deployment.id")
+
+ // DeploymentNameKey is the attribute Key conforming to the "deployment.name"
+ // semantic conventions. It represents the name of the deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "deploy my app", "deploy-frontend"
+ DeploymentNameKey = attribute.Key("deployment.name")
+
+ // DeploymentStatusKey is the attribute Key conforming to the
+ // "deployment.status" semantic conventions. It represents the status of the
+ // deployment.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ DeploymentStatusKey = attribute.Key("deployment.status")
+)
+
+// DeploymentEnvironmentName returns an attribute KeyValue conforming to the
+// "deployment.environment.name" semantic conventions. It represents the name of
+// the [deployment environment] (aka deployment tier).
+//
+// [deployment environment]: https://wikipedia.org/wiki/Deployment_environment
+func DeploymentEnvironmentName(val string) attribute.KeyValue {
+ return DeploymentEnvironmentNameKey.String(val)
+}
+
+// DeploymentID returns an attribute KeyValue conforming to the "deployment.id"
+// semantic conventions. It represents the id of the deployment.
+func DeploymentID(val string) attribute.KeyValue {
+ return DeploymentIDKey.String(val)
+}
+
+// DeploymentName returns an attribute KeyValue conforming to the
+// "deployment.name" semantic conventions. It represents the name of the
+// deployment.
+func DeploymentName(val string) attribute.KeyValue {
+ return DeploymentNameKey.String(val)
+}
+
+// Enum values for deployment.status
+var (
+ // failed
+ // Stability: development
+ DeploymentStatusFailed = DeploymentStatusKey.String("failed")
+ // succeeded
+ // Stability: development
+ DeploymentStatusSucceeded = DeploymentStatusKey.String("succeeded")
+)
+
+// Namespace: destination
+const (
+ // DestinationAddressKey is the attribute Key conforming to the
+ // "destination.address" semantic conventions. It represents the destination
+ // address - domain name if available without reverse DNS lookup; otherwise, IP
+ // address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "destination.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the source side, and when communicating through an
+ // intermediary, `destination.address` SHOULD represent the destination address
+ // behind any intermediaries, for example proxies, if it's available.
+ DestinationAddressKey = attribute.Key("destination.address")
+
+ // DestinationPortKey is the attribute Key conforming to the "destination.port"
+ // semantic conventions. It represents the destination port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ DestinationPortKey = attribute.Key("destination.port")
+)
+
+// DestinationAddress returns an attribute KeyValue conforming to the
+// "destination.address" semantic conventions. It represents the destination
+// address - domain name if available without reverse DNS lookup; otherwise, IP
+// address or Unix domain socket name.
+func DestinationAddress(val string) attribute.KeyValue {
+ return DestinationAddressKey.String(val)
+}
+
+// DestinationPort returns an attribute KeyValue conforming to the
+// "destination.port" semantic conventions. It represents the destination port
+// number.
+func DestinationPort(val int) attribute.KeyValue {
+ return DestinationPortKey.Int(val)
+}
+
+// Namespace: device
+const (
+ // DeviceIDKey is the attribute Key conforming to the "device.id" semantic
+ // conventions. It represents a unique identifier representing the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123456789012345", "01:23:45:67:89:AB"
+ // Note: Its value SHOULD be identical for all apps on a device and it SHOULD
+ // NOT change if an app is uninstalled and re-installed.
+ // However, it might be resettable by the user for all apps on a device.
+ // Hardware IDs (e.g. vendor-specific serial number, IMEI or MAC address) MAY be
+ // used as values.
+ //
+ // More information about Android identifier best practices can be found in the
+ // [Android user data IDs guide].
+ //
+ // > [!WARNING]> This attribute may contain sensitive (PII) information. Caution
+ // > should be taken when storing personal data or anything which can identify a
+ // > user. GDPR and data protection laws may apply,
+ // > ensure you do your own due diligence.> Due to these reasons, this
+ // > identifier is not recommended for consumer applications and will likely
+ // > result in rejection from both Google Play and App Store.
+ // > However, it may be appropriate for specific enterprise scenarios, such as
+ // > kiosk devices or enterprise-managed devices, with appropriate compliance
+ // > clearance.
+ // > Any instrumentation providing this identifier MUST implement it as an
+ // > opt-in feature.> See [`app.installation.id`]> for a more
+ // > privacy-preserving alternative.
+ //
+ // [Android user data IDs guide]: https://developer.android.com/training/articles/user-data-ids
+ // [`app.installation.id`]: /docs/registry/attributes/app.md#app-installation-id
+ DeviceIDKey = attribute.Key("device.id")
+
+ // DeviceManufacturerKey is the attribute Key conforming to the
+ // "device.manufacturer" semantic conventions. It represents the name of the
+ // device manufacturer.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apple", "Samsung"
+ // Note: The Android OS provides this field via [Build]. iOS apps SHOULD
+ // hardcode the value `Apple`.
+ //
+ // [Build]: https://developer.android.com/reference/android/os/Build#MANUFACTURER
+ DeviceManufacturerKey = attribute.Key("device.manufacturer")
+
+ // DeviceModelIdentifierKey is the attribute Key conforming to the
+ // "device.model.identifier" semantic conventions. It represents the model
+ // identifier for the device.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone3,4", "SM-G920F"
+ // Note: It's recommended this value represents a machine-readable version of
+ // the model identifier rather than the market or consumer-friendly name of the
+ // device.
+ DeviceModelIdentifierKey = attribute.Key("device.model.identifier")
+
+ // DeviceModelNameKey is the attribute Key conforming to the "device.model.name"
+ // semantic conventions. It represents the marketing name for the device model.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iPhone 6s Plus", "Samsung Galaxy S6"
+ // Note: It's recommended this value represents a human-readable version of the
+ // device model rather than a machine-readable alternative.
+ DeviceModelNameKey = attribute.Key("device.model.name")
+)
+
+// DeviceID returns an attribute KeyValue conforming to the "device.id" semantic
+// conventions. It represents a unique identifier representing the device.
+func DeviceID(val string) attribute.KeyValue {
+ return DeviceIDKey.String(val)
+}
+
+// DeviceManufacturer returns an attribute KeyValue conforming to the
+// "device.manufacturer" semantic conventions. It represents the name of the
+// device manufacturer.
+func DeviceManufacturer(val string) attribute.KeyValue {
+ return DeviceManufacturerKey.String(val)
+}
+
+// DeviceModelIdentifier returns an attribute KeyValue conforming to the
+// "device.model.identifier" semantic conventions. It represents the model
+// identifier for the device.
+func DeviceModelIdentifier(val string) attribute.KeyValue {
+ return DeviceModelIdentifierKey.String(val)
+}
+
+// DeviceModelName returns an attribute KeyValue conforming to the
+// "device.model.name" semantic conventions. It represents the marketing name for
+// the device model.
+func DeviceModelName(val string) attribute.KeyValue {
+ return DeviceModelNameKey.String(val)
+}
+
+// Namespace: disk
+const (
+ // DiskIODirectionKey is the attribute Key conforming to the "disk.io.direction"
+ // semantic conventions. It represents the disk IO operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "read"
+ DiskIODirectionKey = attribute.Key("disk.io.direction")
+)
+
+// Enum values for disk.io.direction
+var (
+ // read
+ // Stability: development
+ DiskIODirectionRead = DiskIODirectionKey.String("read")
+ // write
+ // Stability: development
+ DiskIODirectionWrite = DiskIODirectionKey.String("write")
+)
+
+// Namespace: dns
+const (
+ // DNSAnswersKey is the attribute Key conforming to the "dns.answers" semantic
+ // conventions. It represents the list of IPv4 or IPv6 addresses resolved during
+ // DNS lookup.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.0.0.1", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"
+ DNSAnswersKey = attribute.Key("dns.answers")
+
+ // DNSQuestionNameKey is the attribute Key conforming to the "dns.question.name"
+ // semantic conventions. It represents the name being queried.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.example.com", "opentelemetry.io"
+ // Note: If the name field contains non-printable characters (below 32 or above
+ // 126), those characters should be represented as escaped base 10 integers
+ // (\DDD). Back slashes and quotes should be escaped. Tabs, carriage returns,
+ // and line feeds should be converted to \t, \r, and \n respectively.
+ DNSQuestionNameKey = attribute.Key("dns.question.name")
+)
+
+// DNSAnswers returns an attribute KeyValue conforming to the "dns.answers"
+// semantic conventions. It represents the list of IPv4 or IPv6 addresses
+// resolved during DNS lookup.
+func DNSAnswers(val ...string) attribute.KeyValue {
+ return DNSAnswersKey.StringSlice(val)
+}
+
+// DNSQuestionName returns an attribute KeyValue conforming to the
+// "dns.question.name" semantic conventions. It represents the name being
+// queried.
+func DNSQuestionName(val string) attribute.KeyValue {
+ return DNSQuestionNameKey.String(val)
+}
+
+// Namespace: elasticsearch
+const (
+ // ElasticsearchNodeNameKey is the attribute Key conforming to the
+ // "elasticsearch.node.name" semantic conventions. It represents the represents
+ // the human-readable identifier of the node/instance to which a request was
+ // routed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-0000000001"
+ ElasticsearchNodeNameKey = attribute.Key("elasticsearch.node.name")
+)
+
+// ElasticsearchNodeName returns an attribute KeyValue conforming to the
+// "elasticsearch.node.name" semantic conventions. It represents the represents
+// the human-readable identifier of the node/instance to which a request was
+// routed.
+func ElasticsearchNodeName(val string) attribute.KeyValue {
+ return ElasticsearchNodeNameKey.String(val)
+}
+
+// Namespace: enduser
+const (
+ // EnduserIDKey is the attribute Key conforming to the "enduser.id" semantic
+ // conventions. It represents the unique identifier of an end user in the
+ // system. It maybe a username, email address, or other identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "username"
+ // Note: Unique identifier of an end user in the system.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (PII) information.
+ EnduserIDKey = attribute.Key("enduser.id")
+
+ // EnduserPseudoIDKey is the attribute Key conforming to the "enduser.pseudo.id"
+ // semantic conventions. It represents the pseudonymous identifier of an end
+ // user. This identifier should be a random value that is not directly linked or
+ // associated with the end user's actual identity.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "QdH5CAWJgqVT4rOr0qtumf"
+ // Note: Pseudonymous identifier of an end user.
+ //
+ // > [!Warning]
+ // > This field contains sensitive (linkable PII) information.
+ EnduserPseudoIDKey = attribute.Key("enduser.pseudo.id")
+)
+
+// EnduserID returns an attribute KeyValue conforming to the "enduser.id"
+// semantic conventions. It represents the unique identifier of an end user in
+// the system. It maybe a username, email address, or other identifier.
+func EnduserID(val string) attribute.KeyValue {
+ return EnduserIDKey.String(val)
+}
+
+// EnduserPseudoID returns an attribute KeyValue conforming to the
+// "enduser.pseudo.id" semantic conventions. It represents the pseudonymous
+// identifier of an end user. This identifier should be a random value that is
+// not directly linked or associated with the end user's actual identity.
+func EnduserPseudoID(val string) attribute.KeyValue {
+ return EnduserPseudoIDKey.String(val)
+}
+
+// Namespace: error
+const (
+ // ErrorMessageKey is the attribute Key conforming to the "error.message"
+ // semantic conventions. It represents a message providing more detail about an
+ // error in human-readable form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Unexpected input type: string", "The user has exceeded their
+ // storage quota"
+ // Note: `error.message` should provide additional context and detail about an
+ // error.
+ // It is NOT RECOMMENDED to duplicate the value of `error.type` in
+ // `error.message`.
+ // It is also NOT RECOMMENDED to duplicate the value of `exception.message` in
+ // `error.message`.
+ //
+ // `error.message` is NOT RECOMMENDED for metrics or spans due to its unbounded
+ // cardinality and overlap with span status.
+ ErrorMessageKey = attribute.Key("error.message")
+
+ // ErrorTypeKey is the attribute Key conforming to the "error.type" semantic
+ // conventions. It represents the describes a class of error the operation ended
+ // with.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "timeout", "java.net.UnknownHostException",
+ // "server_certificate_invalid", "500"
+ // Note: The `error.type` SHOULD be predictable, and SHOULD have low
+ // cardinality.
+ //
+ // When `error.type` is set to a type (e.g., an exception type), its
+ // canonical class name identifying the type within the artifact SHOULD be used.
+ //
+ // Instrumentations SHOULD document the list of errors they report.
+ //
+ // The cardinality of `error.type` within one instrumentation library SHOULD be
+ // low.
+ // Telemetry consumers that aggregate data from multiple instrumentation
+ // libraries and applications
+ // should be prepared for `error.type` to have high cardinality at query time
+ // when no
+ // additional filters are applied.
+ //
+ // If the operation has completed successfully, instrumentations SHOULD NOT set
+ // `error.type`.
+ //
+ // If a specific domain defines its own set of error identifiers (such as HTTP
+ // or gRPC status codes),
+ // it's RECOMMENDED to:
+ //
+ // - Use a domain-specific attribute
+ // - Set `error.type` to capture all errors, regardless of whether they are
+ // defined within the domain-specific set or not.
+ ErrorTypeKey = attribute.Key("error.type")
+)
+
+// ErrorMessage returns an attribute KeyValue conforming to the "error.message"
+// semantic conventions. It represents a message providing more detail about an
+// error in human-readable form.
+func ErrorMessage(val string) attribute.KeyValue {
+ return ErrorMessageKey.String(val)
+}
+
+// Enum values for error.type
+var (
+ // A fallback error value to be used when the instrumentation doesn't define a
+ // custom value.
+ //
+ // Stability: stable
+ ErrorTypeOther = ErrorTypeKey.String("_OTHER")
+)
+
+// Namespace: exception
+const (
+ // ExceptionMessageKey is the attribute Key conforming to the
+ // "exception.message" semantic conventions. It represents the exception
+ // message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "Division by zero", "Can't convert 'int' object to str implicitly"
+ ExceptionMessageKey = attribute.Key("exception.message")
+
+ // ExceptionStacktraceKey is the attribute Key conforming to the
+ // "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+ // string in the natural representation for the language runtime. The
+ // representation is to be determined and documented by each language SIG.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: Exception in thread "main" java.lang.RuntimeException: Test
+ // exception\n at com.example.GenerateTrace.methodB(GenerateTrace.java:13)\n at
+ // com.example.GenerateTrace.methodA(GenerateTrace.java:9)\n at
+ // com.example.GenerateTrace.main(GenerateTrace.java:5)
+ ExceptionStacktraceKey = attribute.Key("exception.stacktrace")
+
+ // ExceptionTypeKey is the attribute Key conforming to the "exception.type"
+ // semantic conventions. It represents the type of the exception (its
+ // fully-qualified class name, if applicable). The dynamic type of the exception
+ // should be preferred over the static type in languages that support it.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "java.net.ConnectException", "OSError"
+ ExceptionTypeKey = attribute.Key("exception.type")
+)
+
+// ExceptionMessage returns an attribute KeyValue conforming to the
+// "exception.message" semantic conventions. It represents the exception message.
+func ExceptionMessage(val string) attribute.KeyValue {
+ return ExceptionMessageKey.String(val)
+}
+
+// ExceptionStacktrace returns an attribute KeyValue conforming to the
+// "exception.stacktrace" semantic conventions. It represents a stacktrace as a
+// string in the natural representation for the language runtime. The
+// representation is to be determined and documented by each language SIG.
+func ExceptionStacktrace(val string) attribute.KeyValue {
+ return ExceptionStacktraceKey.String(val)
+}
+
+// ExceptionType returns an attribute KeyValue conforming to the "exception.type"
+// semantic conventions. It represents the type of the exception (its
+// fully-qualified class name, if applicable). The dynamic type of the exception
+// should be preferred over the static type in languages that support it.
+func ExceptionType(val string) attribute.KeyValue {
+ return ExceptionTypeKey.String(val)
+}
+
+// Namespace: faas
+const (
+ // FaaSColdstartKey is the attribute Key conforming to the "faas.coldstart"
+ // semantic conventions. It represents a boolean that is true if the serverless
+ // function is executed for the first time (aka cold-start).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSColdstartKey = attribute.Key("faas.coldstart")
+
+ // FaaSCronKey is the attribute Key conforming to the "faas.cron" semantic
+ // conventions. It represents a string containing the schedule period as
+ // [Cron Expression].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0/5 * * * ? *
+ //
+ // [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+ FaaSCronKey = attribute.Key("faas.cron")
+
+ // FaaSDocumentCollectionKey is the attribute Key conforming to the
+ // "faas.document.collection" semantic conventions. It represents the name of
+ // the source on which the triggering operation was performed. For example, in
+ // Cloud Storage or S3 corresponds to the bucket name, and in Cosmos DB to the
+ // database name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myBucketName", "myDbName"
+ FaaSDocumentCollectionKey = attribute.Key("faas.document.collection")
+
+ // FaaSDocumentNameKey is the attribute Key conforming to the
+ // "faas.document.name" semantic conventions. It represents the document
+ // name/table subjected to the operation. For example, in Cloud Storage or S3 is
+ // the name of the file, and in Cosmos DB the table name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "myFile.txt", "myTableName"
+ FaaSDocumentNameKey = attribute.Key("faas.document.name")
+
+ // FaaSDocumentOperationKey is the attribute Key conforming to the
+ // "faas.document.operation" semantic conventions. It represents the describes
+ // the type of the operation that was performed on the data.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSDocumentOperationKey = attribute.Key("faas.document.operation")
+
+ // FaaSDocumentTimeKey is the attribute Key conforming to the
+ // "faas.document.time" semantic conventions. It represents a string containing
+ // the time when the data was accessed in the [ISO 8601] format expressed in
+ // [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSDocumentTimeKey = attribute.Key("faas.document.time")
+
+ // FaaSInstanceKey is the attribute Key conforming to the "faas.instance"
+ // semantic conventions. It represents the execution environment ID as a string,
+ // that will be potentially reused for other invocations to the same
+ // function/function version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de"
+ // Note: - **AWS Lambda:** Use the (full) log stream name.
+ FaaSInstanceKey = attribute.Key("faas.instance")
+
+ // FaaSInvocationIDKey is the attribute Key conforming to the
+ // "faas.invocation_id" semantic conventions. It represents the invocation ID of
+ // the current function invocation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: af9d5aa4-a685-4c5f-a22b-444f80b3cc28
+ FaaSInvocationIDKey = attribute.Key("faas.invocation_id")
+
+ // FaaSInvokedNameKey is the attribute Key conforming to the "faas.invoked_name"
+ // semantic conventions. It represents the name of the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: my-function
+ // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked
+ // function.
+ FaaSInvokedNameKey = attribute.Key("faas.invoked_name")
+
+ // FaaSInvokedProviderKey is the attribute Key conforming to the
+ // "faas.invoked_provider" semantic conventions. It represents the cloud
+ // provider of the invoked function.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: SHOULD be equal to the `cloud.provider` resource attribute of the
+ // invoked function.
+ FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider")
+
+ // FaaSInvokedRegionKey is the attribute Key conforming to the
+ // "faas.invoked_region" semantic conventions. It represents the cloud region of
+ // the invoked function.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: eu-central-1
+ // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked
+ // function.
+ FaaSInvokedRegionKey = attribute.Key("faas.invoked_region")
+
+ // FaaSMaxMemoryKey is the attribute Key conforming to the "faas.max_memory"
+ // semantic conventions. It represents the amount of memory available to the
+ // serverless function converted to Bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: It's recommended to set this attribute since e.g. too little memory can
+ // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda,
+ // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this
+ // information (which must be multiplied by 1,048,576).
+ FaaSMaxMemoryKey = attribute.Key("faas.max_memory")
+
+ // FaaSNameKey is the attribute Key conforming to the "faas.name" semantic
+ // conventions. It represents the name of the single function that this runtime
+ // instance executes.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-function", "myazurefunctionapp/some-function-name"
+ // Note: This is the name of the function as configured/deployed on the FaaS
+ // platform and is usually different from the name of the callback
+ // function (which may be stored in the
+ // [`code.namespace`/`code.function.name`]
+ // span attributes).
+ //
+ // For some cloud providers, the above definition is ambiguous. The following
+ // definition of function name MUST be used for this attribute
+ // (and consequently the span name) for the listed cloud providers/products:
+ //
+ // - **Azure:** The full name `/`, i.e., function app name
+ // followed by a forward slash followed by the function name (this form
+ // can also be seen in the resource JSON for the function).
+ // This means that a span attribute MUST be used, as an Azure function
+ // app can host multiple functions that would usually share
+ // a TracerProvider (see also the `cloud.resource_id` attribute).
+ //
+ //
+ // [`code.namespace`/`code.function.name`]: /docs/general/attributes.md#source-code-attributes
+ FaaSNameKey = attribute.Key("faas.name")
+
+ // FaaSTimeKey is the attribute Key conforming to the "faas.time" semantic
+ // conventions. It represents a string containing the function invocation time
+ // in the [ISO 8601] format expressed in [UTC].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 2020-01-23T13:47:06Z
+ //
+ // [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+ // [UTC]: https://www.w3.org/TR/NOTE-datetime
+ FaaSTimeKey = attribute.Key("faas.time")
+
+ // FaaSTriggerKey is the attribute Key conforming to the "faas.trigger" semantic
+ // conventions. It represents the type of the trigger which caused this function
+ // invocation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FaaSTriggerKey = attribute.Key("faas.trigger")
+
+ // FaaSVersionKey is the attribute Key conforming to the "faas.version" semantic
+ // conventions. It represents the immutable version of the function being
+ // executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "26", "pinkfroid-00002"
+ // Note: Depending on the cloud provider and platform, use:
+ //
+ // - **AWS Lambda:** The [function version]
+ // (an integer represented as a decimal string).
+ // - **Google Cloud Run (Services):** The [revision]
+ // (i.e., the function name plus the revision suffix).
+ // - **Google Cloud Functions:** The value of the
+ // [`K_REVISION` environment variable].
+ // - **Azure Functions:** Not applicable. Do not set this attribute.
+ //
+ //
+ // [function version]: https://docs.aws.amazon.com/lambda/latest/dg/configuration-versions.html
+ // [revision]: https://cloud.google.com/run/docs/managing/revisions
+ // [`K_REVISION` environment variable]: https://cloud.google.com/functions/docs/env-var#runtime_environment_variables_set_automatically
+ FaaSVersionKey = attribute.Key("faas.version")
+)
+
+// FaaSColdstart returns an attribute KeyValue conforming to the "faas.coldstart"
+// semantic conventions. It represents a boolean that is true if the serverless
+// function is executed for the first time (aka cold-start).
+func FaaSColdstart(val bool) attribute.KeyValue {
+ return FaaSColdstartKey.Bool(val)
+}
+
+// FaaSCron returns an attribute KeyValue conforming to the "faas.cron" semantic
+// conventions. It represents a string containing the schedule period as
+// [Cron Expression].
+//
+// [Cron Expression]: https://docs.oracle.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm
+func FaaSCron(val string) attribute.KeyValue {
+ return FaaSCronKey.String(val)
+}
+
+// FaaSDocumentCollection returns an attribute KeyValue conforming to the
+// "faas.document.collection" semantic conventions. It represents the name of the
+// source on which the triggering operation was performed. For example, in Cloud
+// Storage or S3 corresponds to the bucket name, and in Cosmos DB to the database
+// name.
+func FaaSDocumentCollection(val string) attribute.KeyValue {
+ return FaaSDocumentCollectionKey.String(val)
+}
+
+// FaaSDocumentName returns an attribute KeyValue conforming to the
+// "faas.document.name" semantic conventions. It represents the document
+// name/table subjected to the operation. For example, in Cloud Storage or S3 is
+// the name of the file, and in Cosmos DB the table name.
+func FaaSDocumentName(val string) attribute.KeyValue {
+ return FaaSDocumentNameKey.String(val)
+}
+
+// FaaSDocumentTime returns an attribute KeyValue conforming to the
+// "faas.document.time" semantic conventions. It represents a string containing
+// the time when the data was accessed in the [ISO 8601] format expressed in
+// [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSDocumentTime(val string) attribute.KeyValue {
+ return FaaSDocumentTimeKey.String(val)
+}
+
+// FaaSInstance returns an attribute KeyValue conforming to the "faas.instance"
+// semantic conventions. It represents the execution environment ID as a string,
+// that will be potentially reused for other invocations to the same
+// function/function version.
+func FaaSInstance(val string) attribute.KeyValue {
+ return FaaSInstanceKey.String(val)
+}
+
+// FaaSInvocationID returns an attribute KeyValue conforming to the
+// "faas.invocation_id" semantic conventions. It represents the invocation ID of
+// the current function invocation.
+func FaaSInvocationID(val string) attribute.KeyValue {
+ return FaaSInvocationIDKey.String(val)
+}
+
+// FaaSInvokedName returns an attribute KeyValue conforming to the
+// "faas.invoked_name" semantic conventions. It represents the name of the
+// invoked function.
+func FaaSInvokedName(val string) attribute.KeyValue {
+ return FaaSInvokedNameKey.String(val)
+}
+
+// FaaSInvokedRegion returns an attribute KeyValue conforming to the
+// "faas.invoked_region" semantic conventions. It represents the cloud region of
+// the invoked function.
+func FaaSInvokedRegion(val string) attribute.KeyValue {
+ return FaaSInvokedRegionKey.String(val)
+}
+
+// FaaSMaxMemory returns an attribute KeyValue conforming to the
+// "faas.max_memory" semantic conventions. It represents the amount of memory
+// available to the serverless function converted to Bytes.
+func FaaSMaxMemory(val int) attribute.KeyValue {
+ return FaaSMaxMemoryKey.Int(val)
+}
+
+// FaaSName returns an attribute KeyValue conforming to the "faas.name" semantic
+// conventions. It represents the name of the single function that this runtime
+// instance executes.
+func FaaSName(val string) attribute.KeyValue {
+ return FaaSNameKey.String(val)
+}
+
+// FaaSTime returns an attribute KeyValue conforming to the "faas.time" semantic
+// conventions. It represents a string containing the function invocation time in
+// the [ISO 8601] format expressed in [UTC].
+//
+// [ISO 8601]: https://www.iso.org/iso-8601-date-and-time-format.html
+// [UTC]: https://www.w3.org/TR/NOTE-datetime
+func FaaSTime(val string) attribute.KeyValue {
+ return FaaSTimeKey.String(val)
+}
+
+// FaaSVersion returns an attribute KeyValue conforming to the "faas.version"
+// semantic conventions. It represents the immutable version of the function
+// being executed.
+func FaaSVersion(val string) attribute.KeyValue {
+ return FaaSVersionKey.String(val)
+}
+
+// Enum values for faas.document.operation
+var (
+ // When a new object is created.
+ // Stability: development
+ FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert")
+ // When an object is modified.
+ // Stability: development
+ FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit")
+ // When an object is deleted.
+ // Stability: development
+ FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete")
+)
+
+// Enum values for faas.invoked_provider
+var (
+ // Alibaba Cloud
+ // Stability: development
+ FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud")
+ // Amazon Web Services
+ // Stability: development
+ FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws")
+ // Microsoft Azure
+ // Stability: development
+ FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure")
+ // Google Cloud Platform
+ // Stability: development
+ FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp")
+ // Tencent Cloud
+ // Stability: development
+ FaaSInvokedProviderTencentCloud = FaaSInvokedProviderKey.String("tencent_cloud")
+)
+
+// Enum values for faas.trigger
+var (
+ // A response to some data source operation such as a database or filesystem
+ // read/write
+ // Stability: development
+ FaaSTriggerDatasource = FaaSTriggerKey.String("datasource")
+ // To provide an answer to an inbound HTTP request
+ // Stability: development
+ FaaSTriggerHTTP = FaaSTriggerKey.String("http")
+ // A function is set to be executed when messages are sent to a messaging system
+ // Stability: development
+ FaaSTriggerPubSub = FaaSTriggerKey.String("pubsub")
+ // A function is scheduled to be executed regularly
+ // Stability: development
+ FaaSTriggerTimer = FaaSTriggerKey.String("timer")
+ // If none of the others apply
+ // Stability: development
+ FaaSTriggerOther = FaaSTriggerKey.String("other")
+)
+
+// Namespace: feature_flag
+const (
+ // FeatureFlagContextIDKey is the attribute Key conforming to the
+ // "feature_flag.context.id" semantic conventions. It represents the unique
+ // identifier for the flag evaluation context. For example, the targeting key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "5157782b-2203-4c80-a857-dbbd5e7761db"
+ FeatureFlagContextIDKey = attribute.Key("feature_flag.context.id")
+
+ // FeatureFlagKeyKey is the attribute Key conforming to the "feature_flag.key"
+ // semantic conventions. It represents the lookup key of the feature flag.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "logo-color"
+ FeatureFlagKeyKey = attribute.Key("feature_flag.key")
+
+ // FeatureFlagProviderNameKey is the attribute Key conforming to the
+ // "feature_flag.provider.name" semantic conventions. It represents the
+ // identifies the feature flag provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "Flag Manager"
+ FeatureFlagProviderNameKey = attribute.Key("feature_flag.provider.name")
+
+ // FeatureFlagResultReasonKey is the attribute Key conforming to the
+ // "feature_flag.result.reason" semantic conventions. It represents the reason
+ // code which shows how a feature flag value was determined.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "static", "targeting_match", "error", "default"
+ FeatureFlagResultReasonKey = attribute.Key("feature_flag.result.reason")
+
+ // FeatureFlagResultValueKey is the attribute Key conforming to the
+ // "feature_flag.result.value" semantic conventions. It represents the evaluated
+ // value of the feature flag.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "#ff0000", true, 3
+ // Note: With some feature flag providers, feature flag results can be quite
+ // large or contain private or sensitive details.
+ // Because of this, `feature_flag.result.variant` is often the preferred
+ // attribute if it is available.
+ //
+ // It may be desirable to redact or otherwise limit the size and scope of
+ // `feature_flag.result.value` if possible.
+ // Because the evaluated flag value is unstructured and may be any type, it is
+ // left to the instrumentation author to determine how best to achieve this.
+ FeatureFlagResultValueKey = attribute.Key("feature_flag.result.value")
+
+ // FeatureFlagResultVariantKey is the attribute Key conforming to the
+ // "feature_flag.result.variant" semantic conventions. It represents a semantic
+ // identifier for an evaluated flag value.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "red", "true", "on"
+ // Note: A semantic identifier, commonly referred to as a variant, provides a
+ // means
+ // for referring to a value without including the value itself. This can
+ // provide additional context for understanding the meaning behind a value.
+ // For example, the variant `red` maybe be used for the value `#c05543`.
+ FeatureFlagResultVariantKey = attribute.Key("feature_flag.result.variant")
+
+ // FeatureFlagSetIDKey is the attribute Key conforming to the
+ // "feature_flag.set.id" semantic conventions. It represents the identifier of
+ // the [flag set] to which the feature flag belongs.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "proj-1", "ab98sgs", "service1/dev"
+ //
+ // [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+ FeatureFlagSetIDKey = attribute.Key("feature_flag.set.id")
+
+ // FeatureFlagVersionKey is the attribute Key conforming to the
+ // "feature_flag.version" semantic conventions. It represents the version of the
+ // ruleset used during the evaluation. This may be any stable value which
+ // uniquely identifies the ruleset.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Release_Candidate
+ //
+ // Examples: "1", "01ABCDEF"
+ FeatureFlagVersionKey = attribute.Key("feature_flag.version")
+)
+
+// FeatureFlagContextID returns an attribute KeyValue conforming to the
+// "feature_flag.context.id" semantic conventions. It represents the unique
+// identifier for the flag evaluation context. For example, the targeting key.
+func FeatureFlagContextID(val string) attribute.KeyValue {
+ return FeatureFlagContextIDKey.String(val)
+}
+
+// FeatureFlagKey returns an attribute KeyValue conforming to the
+// "feature_flag.key" semantic conventions. It represents the lookup key of the
+// feature flag.
+func FeatureFlagKey(val string) attribute.KeyValue {
+ return FeatureFlagKeyKey.String(val)
+}
+
+// FeatureFlagProviderName returns an attribute KeyValue conforming to the
+// "feature_flag.provider.name" semantic conventions. It represents the
+// identifies the feature flag provider.
+func FeatureFlagProviderName(val string) attribute.KeyValue {
+ return FeatureFlagProviderNameKey.String(val)
+}
+
+// FeatureFlagResultVariant returns an attribute KeyValue conforming to the
+// "feature_flag.result.variant" semantic conventions. It represents a semantic
+// identifier for an evaluated flag value.
+func FeatureFlagResultVariant(val string) attribute.KeyValue {
+ return FeatureFlagResultVariantKey.String(val)
+}
+
+// FeatureFlagSetID returns an attribute KeyValue conforming to the
+// "feature_flag.set.id" semantic conventions. It represents the identifier of
+// the [flag set] to which the feature flag belongs.
+//
+// [flag set]: https://openfeature.dev/specification/glossary/#flag-set
+func FeatureFlagSetID(val string) attribute.KeyValue {
+ return FeatureFlagSetIDKey.String(val)
+}
+
+// FeatureFlagVersion returns an attribute KeyValue conforming to the
+// "feature_flag.version" semantic conventions. It represents the version of the
+// ruleset used during the evaluation. This may be any stable value which
+// uniquely identifies the ruleset.
+func FeatureFlagVersion(val string) attribute.KeyValue {
+ return FeatureFlagVersionKey.String(val)
+}
+
+// Enum values for feature_flag.result.reason
+var (
+ // The resolved value is static (no dynamic evaluation).
+ // Stability: release_candidate
+ FeatureFlagResultReasonStatic = FeatureFlagResultReasonKey.String("static")
+ // The resolved value fell back to a pre-configured value (no dynamic evaluation
+ // occurred or dynamic evaluation yielded no result).
+ // Stability: release_candidate
+ FeatureFlagResultReasonDefault = FeatureFlagResultReasonKey.String("default")
+ // The resolved value was the result of a dynamic evaluation, such as a rule or
+ // specific user-targeting.
+ // Stability: release_candidate
+ FeatureFlagResultReasonTargetingMatch = FeatureFlagResultReasonKey.String("targeting_match")
+ // The resolved value was the result of pseudorandom assignment.
+ // Stability: release_candidate
+ FeatureFlagResultReasonSplit = FeatureFlagResultReasonKey.String("split")
+ // The resolved value was retrieved from cache.
+ // Stability: release_candidate
+ FeatureFlagResultReasonCached = FeatureFlagResultReasonKey.String("cached")
+ // The resolved value was the result of the flag being disabled in the
+ // management system.
+ // Stability: release_candidate
+ FeatureFlagResultReasonDisabled = FeatureFlagResultReasonKey.String("disabled")
+ // The reason for the resolved value could not be determined.
+ // Stability: release_candidate
+ FeatureFlagResultReasonUnknown = FeatureFlagResultReasonKey.String("unknown")
+ // The resolved value is non-authoritative or possibly out of date
+ // Stability: release_candidate
+ FeatureFlagResultReasonStale = FeatureFlagResultReasonKey.String("stale")
+ // The resolved value was the result of an error.
+ // Stability: release_candidate
+ FeatureFlagResultReasonError = FeatureFlagResultReasonKey.String("error")
+)
+
+// Namespace: file
+const (
+ // FileAccessedKey is the attribute Key conforming to the "file.accessed"
+ // semantic conventions. It represents the time when the file was last accessed,
+ // in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileAccessedKey = attribute.Key("file.accessed")
+
+ // FileAttributesKey is the attribute Key conforming to the "file.attributes"
+ // semantic conventions. It represents the array of file attributes.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "readonly", "hidden"
+ // Note: Attributes names depend on the OS or file system. Here’s a
+ // non-exhaustive list of values expected for this attribute: `archive`,
+ // `compressed`, `directory`, `encrypted`, `execute`, `hidden`, `immutable`,
+ // `journaled`, `read`, `readonly`, `symbolic link`, `system`, `temporary`,
+ // `write`.
+ FileAttributesKey = attribute.Key("file.attributes")
+
+ // FileChangedKey is the attribute Key conforming to the "file.changed" semantic
+ // conventions. It represents the time when the file attributes or metadata was
+ // last changed, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: `file.changed` captures the time when any of the file's properties or
+ // attributes (including the content) are changed, while `file.modified`
+ // captures the timestamp when the file content is modified.
+ FileChangedKey = attribute.Key("file.changed")
+
+ // FileCreatedKey is the attribute Key conforming to the "file.created" semantic
+ // conventions. It represents the time when the file was created, in ISO 8601
+ // format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ // Note: This attribute might not be supported by some file systems — NFS,
+ // FAT32, in embedded OS, etc.
+ FileCreatedKey = attribute.Key("file.created")
+
+ // FileDirectoryKey is the attribute Key conforming to the "file.directory"
+ // semantic conventions. It represents the directory where the file is located.
+ // It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/user", "C:\Program Files\MyApp"
+ FileDirectoryKey = attribute.Key("file.directory")
+
+ // FileExtensionKey is the attribute Key conforming to the "file.extension"
+ // semantic conventions. It represents the file extension, excluding the leading
+ // dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: When the file name has multiple extensions (example.tar.gz), only the
+ // last one should be captured ("gz", not "tar.gz").
+ FileExtensionKey = attribute.Key("file.extension")
+
+ // FileForkNameKey is the attribute Key conforming to the "file.fork_name"
+ // semantic conventions. It represents the name of the fork. A fork is
+ // additional data associated with a filesystem object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Zone.Identifier"
+ // Note: On Linux, a resource fork is used to store additional data with a
+ // filesystem object. A file always has at least one fork for the data portion,
+ // and additional forks may exist.
+ // On NTFS, this is analogous to an Alternate Data Stream (ADS), and the default
+ // data stream for a file is just called $DATA. Zone.Identifier is commonly used
+ // by Windows to track contents downloaded from the Internet. An ADS is
+ // typically of the form: C:\path\to\filename.extension:some_fork_name, and
+ // some_fork_name is the value that should populate `fork_name`.
+ // `filename.extension` should populate `file.name`, and `extension` should
+ // populate `file.extension`. The full path, `file.path`, will include the fork
+ // name.
+ FileForkNameKey = attribute.Key("file.fork_name")
+
+ // FileGroupIDKey is the attribute Key conforming to the "file.group.id"
+ // semantic conventions. It represents the primary Group ID (GID) of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileGroupIDKey = attribute.Key("file.group.id")
+
+ // FileGroupNameKey is the attribute Key conforming to the "file.group.name"
+ // semantic conventions. It represents the primary group name of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "users"
+ FileGroupNameKey = attribute.Key("file.group.name")
+
+ // FileInodeKey is the attribute Key conforming to the "file.inode" semantic
+ // conventions. It represents the inode representing the file in the filesystem.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "256383"
+ FileInodeKey = attribute.Key("file.inode")
+
+ // FileModeKey is the attribute Key conforming to the "file.mode" semantic
+ // conventions. It represents the mode of the file in octal representation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0640"
+ FileModeKey = attribute.Key("file.mode")
+
+ // FileModifiedKey is the attribute Key conforming to the "file.modified"
+ // semantic conventions. It represents the time when the file content was last
+ // modified, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T12:00:00Z"
+ FileModifiedKey = attribute.Key("file.modified")
+
+ // FileNameKey is the attribute Key conforming to the "file.name" semantic
+ // conventions. It represents the name of the file including the extension,
+ // without the directory.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.png"
+ FileNameKey = attribute.Key("file.name")
+
+ // FileOwnerIDKey is the attribute Key conforming to the "file.owner.id"
+ // semantic conventions. It represents the user ID (UID) or security identifier
+ // (SID) of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1000"
+ FileOwnerIDKey = attribute.Key("file.owner.id")
+
+ // FileOwnerNameKey is the attribute Key conforming to the "file.owner.name"
+ // semantic conventions. It represents the username of the file owner.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ FileOwnerNameKey = attribute.Key("file.owner.name")
+
+ // FilePathKey is the attribute Key conforming to the "file.path" semantic
+ // conventions. It represents the full path to the file, including the file
+ // name. It should include the drive letter, when appropriate.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/home/alice/example.png", "C:\Program Files\MyApp\myapp.exe"
+ FilePathKey = attribute.Key("file.path")
+
+ // FileSizeKey is the attribute Key conforming to the "file.size" semantic
+ // conventions. It represents the file size in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ FileSizeKey = attribute.Key("file.size")
+
+ // FileSymbolicLinkTargetPathKey is the attribute Key conforming to the
+ // "file.symbolic_link.target_path" semantic conventions. It represents the path
+ // to the target of a symbolic link.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/python3"
+ // Note: This attribute is only applicable to symbolic links.
+ FileSymbolicLinkTargetPathKey = attribute.Key("file.symbolic_link.target_path")
+)
+
+// FileAccessed returns an attribute KeyValue conforming to the "file.accessed"
+// semantic conventions. It represents the time when the file was last accessed,
+// in ISO 8601 format.
+func FileAccessed(val string) attribute.KeyValue {
+ return FileAccessedKey.String(val)
+}
+
+// FileAttributes returns an attribute KeyValue conforming to the
+// "file.attributes" semantic conventions. It represents the array of file
+// attributes.
+func FileAttributes(val ...string) attribute.KeyValue {
+ return FileAttributesKey.StringSlice(val)
+}
+
+// FileChanged returns an attribute KeyValue conforming to the "file.changed"
+// semantic conventions. It represents the time when the file attributes or
+// metadata was last changed, in ISO 8601 format.
+func FileChanged(val string) attribute.KeyValue {
+ return FileChangedKey.String(val)
+}
+
+// FileCreated returns an attribute KeyValue conforming to the "file.created"
+// semantic conventions. It represents the time when the file was created, in ISO
+// 8601 format.
+func FileCreated(val string) attribute.KeyValue {
+ return FileCreatedKey.String(val)
+}
+
+// FileDirectory returns an attribute KeyValue conforming to the "file.directory"
+// semantic conventions. It represents the directory where the file is located.
+// It should include the drive letter, when appropriate.
+func FileDirectory(val string) attribute.KeyValue {
+ return FileDirectoryKey.String(val)
+}
+
+// FileExtension returns an attribute KeyValue conforming to the "file.extension"
+// semantic conventions. It represents the file extension, excluding the leading
+// dot.
+func FileExtension(val string) attribute.KeyValue {
+ return FileExtensionKey.String(val)
+}
+
+// FileForkName returns an attribute KeyValue conforming to the "file.fork_name"
+// semantic conventions. It represents the name of the fork. A fork is additional
+// data associated with a filesystem object.
+func FileForkName(val string) attribute.KeyValue {
+ return FileForkNameKey.String(val)
+}
+
+// FileGroupID returns an attribute KeyValue conforming to the "file.group.id"
+// semantic conventions. It represents the primary Group ID (GID) of the file.
+func FileGroupID(val string) attribute.KeyValue {
+ return FileGroupIDKey.String(val)
+}
+
+// FileGroupName returns an attribute KeyValue conforming to the
+// "file.group.name" semantic conventions. It represents the primary group name
+// of the file.
+func FileGroupName(val string) attribute.KeyValue {
+ return FileGroupNameKey.String(val)
+}
+
+// FileInode returns an attribute KeyValue conforming to the "file.inode"
+// semantic conventions. It represents the inode representing the file in the
+// filesystem.
+func FileInode(val string) attribute.KeyValue {
+ return FileInodeKey.String(val)
+}
+
+// FileMode returns an attribute KeyValue conforming to the "file.mode" semantic
+// conventions. It represents the mode of the file in octal representation.
+func FileMode(val string) attribute.KeyValue {
+ return FileModeKey.String(val)
+}
+
+// FileModified returns an attribute KeyValue conforming to the "file.modified"
+// semantic conventions. It represents the time when the file content was last
+// modified, in ISO 8601 format.
+func FileModified(val string) attribute.KeyValue {
+ return FileModifiedKey.String(val)
+}
+
+// FileName returns an attribute KeyValue conforming to the "file.name" semantic
+// conventions. It represents the name of the file including the extension,
+// without the directory.
+func FileName(val string) attribute.KeyValue {
+ return FileNameKey.String(val)
+}
+
+// FileOwnerID returns an attribute KeyValue conforming to the "file.owner.id"
+// semantic conventions. It represents the user ID (UID) or security identifier
+// (SID) of the file owner.
+func FileOwnerID(val string) attribute.KeyValue {
+ return FileOwnerIDKey.String(val)
+}
+
+// FileOwnerName returns an attribute KeyValue conforming to the
+// "file.owner.name" semantic conventions. It represents the username of the file
+// owner.
+func FileOwnerName(val string) attribute.KeyValue {
+ return FileOwnerNameKey.String(val)
+}
+
+// FilePath returns an attribute KeyValue conforming to the "file.path" semantic
+// conventions. It represents the full path to the file, including the file name.
+// It should include the drive letter, when appropriate.
+func FilePath(val string) attribute.KeyValue {
+ return FilePathKey.String(val)
+}
+
+// FileSize returns an attribute KeyValue conforming to the "file.size" semantic
+// conventions. It represents the file size in bytes.
+func FileSize(val int) attribute.KeyValue {
+ return FileSizeKey.Int(val)
+}
+
+// FileSymbolicLinkTargetPath returns an attribute KeyValue conforming to the
+// "file.symbolic_link.target_path" semantic conventions. It represents the path
+// to the target of a symbolic link.
+func FileSymbolicLinkTargetPath(val string) attribute.KeyValue {
+ return FileSymbolicLinkTargetPathKey.String(val)
+}
+
+// Namespace: gcp
+const (
+ // GCPAppHubApplicationContainerKey is the attribute Key conforming to the
+ // "gcp.apphub.application.container" semantic conventions. It represents the
+ // container within GCP where the AppHub application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "projects/my-container-project"
+ GCPAppHubApplicationContainerKey = attribute.Key("gcp.apphub.application.container")
+
+ // GCPAppHubApplicationIDKey is the attribute Key conforming to the
+ // "gcp.apphub.application.id" semantic conventions. It represents the name of
+ // the application as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-application"
+ GCPAppHubApplicationIDKey = attribute.Key("gcp.apphub.application.id")
+
+ // GCPAppHubApplicationLocationKey is the attribute Key conforming to the
+ // "gcp.apphub.application.location" semantic conventions. It represents the GCP
+ // zone or region where the application is defined.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "us-central1"
+ GCPAppHubApplicationLocationKey = attribute.Key("gcp.apphub.application.location")
+
+ // GCPAppHubServiceCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.criticality_type" semantic conventions. It represents the
+ // criticality of a service indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubServiceCriticalityTypeKey = attribute.Key("gcp.apphub.service.criticality_type")
+
+ // GCPAppHubServiceEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.service.environment_type" semantic conventions. It represents the
+ // environment of a service is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubServiceEnvironmentTypeKey = attribute.Key("gcp.apphub.service.environment_type")
+
+ // GCPAppHubServiceIDKey is the attribute Key conforming to the
+ // "gcp.apphub.service.id" semantic conventions. It represents the name of the
+ // service as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-service"
+ GCPAppHubServiceIDKey = attribute.Key("gcp.apphub.service.id")
+
+ // GCPAppHubWorkloadCriticalityTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.criticality_type" semantic conventions. It represents
+ // the criticality of a workload indicates its importance to the business.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub type enum]
+ //
+ // [See AppHub type enum]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type
+ GCPAppHubWorkloadCriticalityTypeKey = attribute.Key("gcp.apphub.workload.criticality_type")
+
+ // GCPAppHubWorkloadEnvironmentTypeKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.environment_type" semantic conventions. It represents
+ // the environment of a workload is the stage of a software lifecycle.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: [See AppHub environment type]
+ //
+ // [See AppHub environment type]: https://cloud.google.com/app-hub/docs/reference/rest/v1/Attributes#type_1
+ GCPAppHubWorkloadEnvironmentTypeKey = attribute.Key("gcp.apphub.workload.environment_type")
+
+ // GCPAppHubWorkloadIDKey is the attribute Key conforming to the
+ // "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+ // workload as configured in AppHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-workload"
+ GCPAppHubWorkloadIDKey = attribute.Key("gcp.apphub.workload.id")
+
+ // GCPClientServiceKey is the attribute Key conforming to the
+ // "gcp.client.service" semantic conventions. It represents the identifies the
+ // Google Cloud service for which the official client library is intended.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "appengine", "run", "firestore", "alloydb", "spanner"
+ // Note: Intended to be a stable identifier for Google Cloud client libraries
+ // that is uniform across implementation languages. The value should be derived
+ // from the canonical service domain for the service; for example,
+ // 'foo.googleapis.com' should result in a value of 'foo'.
+ GCPClientServiceKey = attribute.Key("gcp.client.service")
+
+ // GCPCloudRunJobExecutionKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+ // the Cloud Run [execution] being run for the Job, as set by the
+ // [`CLOUD_RUN_EXECUTION`] environment variable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "job-name-xxxx", "sample-job-mdw84"
+ //
+ // [execution]: https://cloud.google.com/run/docs/managing/job-executions
+ // [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobExecutionKey = attribute.Key("gcp.cloud_run.job.execution")
+
+ // GCPCloudRunJobTaskIndexKey is the attribute Key conforming to the
+ // "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+ // for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+ // environment variable.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1
+ //
+ // [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+ GCPCloudRunJobTaskIndexKey = attribute.Key("gcp.cloud_run.job.task_index")
+
+ // GCPGCEInstanceHostnameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+ // of a GCE instance. This is the full value of the default or [custom hostname]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-host1234.example.com",
+ // "sample-vm.us-west1-b.c.my-project.internal"
+ //
+ // [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+ GCPGCEInstanceHostnameKey = attribute.Key("gcp.gce.instance.hostname")
+
+ // GCPGCEInstanceNameKey is the attribute Key conforming to the
+ // "gcp.gce.instance.name" semantic conventions. It represents the instance name
+ // of a GCE instance. This is the value provided by `host.name`, the visible
+ // name of the instance in the Cloud Console UI, and the prefix for the default
+ // hostname of the instance as defined by the [default internal DNS name].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "instance-1", "my-vm-name"
+ //
+ // [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+ GCPGCEInstanceNameKey = attribute.Key("gcp.gce.instance.name")
+)
+
+// GCPAppHubApplicationContainer returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.container" semantic conventions. It represents the
+// container within GCP where the AppHub application is defined.
+func GCPAppHubApplicationContainer(val string) attribute.KeyValue {
+ return GCPAppHubApplicationContainerKey.String(val)
+}
+
+// GCPAppHubApplicationID returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.id" semantic conventions. It represents the name of
+// the application as configured in AppHub.
+func GCPAppHubApplicationID(val string) attribute.KeyValue {
+ return GCPAppHubApplicationIDKey.String(val)
+}
+
+// GCPAppHubApplicationLocation returns an attribute KeyValue conforming to the
+// "gcp.apphub.application.location" semantic conventions. It represents the GCP
+// zone or region where the application is defined.
+func GCPAppHubApplicationLocation(val string) attribute.KeyValue {
+ return GCPAppHubApplicationLocationKey.String(val)
+}
+
+// GCPAppHubServiceID returns an attribute KeyValue conforming to the
+// "gcp.apphub.service.id" semantic conventions. It represents the name of the
+// service as configured in AppHub.
+func GCPAppHubServiceID(val string) attribute.KeyValue {
+ return GCPAppHubServiceIDKey.String(val)
+}
+
+// GCPAppHubWorkloadID returns an attribute KeyValue conforming to the
+// "gcp.apphub.workload.id" semantic conventions. It represents the name of the
+// workload as configured in AppHub.
+func GCPAppHubWorkloadID(val string) attribute.KeyValue {
+ return GCPAppHubWorkloadIDKey.String(val)
+}
+
+// GCPClientService returns an attribute KeyValue conforming to the
+// "gcp.client.service" semantic conventions. It represents the identifies the
+// Google Cloud service for which the official client library is intended.
+func GCPClientService(val string) attribute.KeyValue {
+ return GCPClientServiceKey.String(val)
+}
+
+// GCPCloudRunJobExecution returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.execution" semantic conventions. It represents the name of
+// the Cloud Run [execution] being run for the Job, as set by the
+// [`CLOUD_RUN_EXECUTION`] environment variable.
+//
+// [execution]: https://cloud.google.com/run/docs/managing/job-executions
+// [`CLOUD_RUN_EXECUTION`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobExecution(val string) attribute.KeyValue {
+ return GCPCloudRunJobExecutionKey.String(val)
+}
+
+// GCPCloudRunJobTaskIndex returns an attribute KeyValue conforming to the
+// "gcp.cloud_run.job.task_index" semantic conventions. It represents the index
+// for a task within an execution as provided by the [`CLOUD_RUN_TASK_INDEX`]
+// environment variable.
+//
+// [`CLOUD_RUN_TASK_INDEX`]: https://cloud.google.com/run/docs/container-contract#jobs-env-vars
+func GCPCloudRunJobTaskIndex(val int) attribute.KeyValue {
+ return GCPCloudRunJobTaskIndexKey.Int(val)
+}
+
+// GCPGCEInstanceHostname returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.hostname" semantic conventions. It represents the hostname
+// of a GCE instance. This is the full value of the default or [custom hostname]
+// .
+//
+// [custom hostname]: https://cloud.google.com/compute/docs/instances/custom-hostname-vm
+func GCPGCEInstanceHostname(val string) attribute.KeyValue {
+ return GCPGCEInstanceHostnameKey.String(val)
+}
+
+// GCPGCEInstanceName returns an attribute KeyValue conforming to the
+// "gcp.gce.instance.name" semantic conventions. It represents the instance name
+// of a GCE instance. This is the value provided by `host.name`, the visible name
+// of the instance in the Cloud Console UI, and the prefix for the default
+// hostname of the instance as defined by the [default internal DNS name].
+//
+// [default internal DNS name]: https://cloud.google.com/compute/docs/internal-dns#instance-fully-qualified-domain-names
+func GCPGCEInstanceName(val string) attribute.KeyValue {
+ return GCPGCEInstanceNameKey.String(val)
+}
+
+// Enum values for gcp.apphub.service.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMissionCritical = GCPAppHubServiceCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeHigh = GCPAppHubServiceCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeMedium = GCPAppHubServiceCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubServiceCriticalityTypeLow = GCPAppHubServiceCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.service.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeProduction = GCPAppHubServiceEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeStaging = GCPAppHubServiceEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeTest = GCPAppHubServiceEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubServiceEnvironmentTypeDevelopment = GCPAppHubServiceEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Enum values for gcp.apphub.workload.criticality_type
+var (
+ // Mission critical service.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMissionCritical = GCPAppHubWorkloadCriticalityTypeKey.String("MISSION_CRITICAL")
+ // High impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeHigh = GCPAppHubWorkloadCriticalityTypeKey.String("HIGH")
+ // Medium impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeMedium = GCPAppHubWorkloadCriticalityTypeKey.String("MEDIUM")
+ // Low impact.
+ // Stability: development
+ GCPAppHubWorkloadCriticalityTypeLow = GCPAppHubWorkloadCriticalityTypeKey.String("LOW")
+)
+
+// Enum values for gcp.apphub.workload.environment_type
+var (
+ // Production environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeProduction = GCPAppHubWorkloadEnvironmentTypeKey.String("PRODUCTION")
+ // Staging environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeStaging = GCPAppHubWorkloadEnvironmentTypeKey.String("STAGING")
+ // Test environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeTest = GCPAppHubWorkloadEnvironmentTypeKey.String("TEST")
+ // Development environment.
+ // Stability: development
+ GCPAppHubWorkloadEnvironmentTypeDevelopment = GCPAppHubWorkloadEnvironmentTypeKey.String("DEVELOPMENT")
+)
+
+// Namespace: gen_ai
+const (
+ // GenAIAgentDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.agent.description" semantic conventions. It represents the free-form
+ // description of the GenAI agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Helps with math problems", "Generates fiction stories"
+ GenAIAgentDescriptionKey = attribute.Key("gen_ai.agent.description")
+
+ // GenAIAgentIDKey is the attribute Key conforming to the "gen_ai.agent.id"
+ // semantic conventions. It represents the unique identifier of the GenAI agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "asst_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIAgentIDKey = attribute.Key("gen_ai.agent.id")
+
+ // GenAIAgentNameKey is the attribute Key conforming to the "gen_ai.agent.name"
+ // semantic conventions. It represents the human-readable name of the GenAI
+ // agent provided by the application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Math Tutor", "Fiction Writer"
+ GenAIAgentNameKey = attribute.Key("gen_ai.agent.name")
+
+ // GenAIConversationIDKey is the attribute Key conforming to the
+ // "gen_ai.conversation.id" semantic conventions. It represents the unique
+ // identifier for a conversation (session, thread), used to store and correlate
+ // messages within this conversation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "conv_5j66UpCpwteGg4YSxUnt7lPY"
+ GenAIConversationIDKey = attribute.Key("gen_ai.conversation.id")
+
+ // GenAIDataSourceIDKey is the attribute Key conforming to the
+ // "gen_ai.data_source.id" semantic conventions. It represents the data source
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "H7STPQYOND"
+ // Note: Data sources are used by AI agents and RAG applications to store
+ // grounding data. A data source may be an external database, object store,
+ // document collection, website, or any other storage system used by the GenAI
+ // agent or application. The `gen_ai.data_source.id` SHOULD match the identifier
+ // used by the GenAI system rather than a name specific to the external storage,
+ // such as a database or object store. Semantic conventions referencing
+ // `gen_ai.data_source.id` MAY also leverage additional attributes, such as
+ // `db.*`, to further identify and describe the data source.
+ GenAIDataSourceIDKey = attribute.Key("gen_ai.data_source.id")
+
+ // GenAIInputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.input.messages" semantic conventions. It represents the chat history
+ // provided to the model as an input.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "user",\n "parts": [\n {\n "type": "text",\n
+ // "content": "Weather in Paris?"\n }\n ]\n },\n {\n "role": "assistant",\n
+ // "parts": [\n {\n "type": "tool_call",\n "id":
+ // "call_VSPygqKTWdrhaFErNvMV18Yl",\n "name": "get_weather",\n "arguments": {\n
+ // "location": "Paris"\n }\n }\n ]\n },\n {\n "role": "tool",\n "parts": [\n {\n
+ // "type": "tool_call_response",\n "id": " call_VSPygqKTWdrhaFErNvMV18Yl",\n
+ // "result": "rainy, 57°F"\n }\n ]\n }\n]\n"
+ // Note: Instrumentations MUST follow [Input messages JSON schema].
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Messages MUST be provided in the order they were sent to the model.
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // input messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Input messages JSON schema]: /docs/gen-ai/gen-ai-input-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIInputMessagesKey = attribute.Key("gen_ai.input.messages")
+
+ // GenAIOperationNameKey is the attribute Key conforming to the
+ // "gen_ai.operation.name" semantic conventions. It represents the name of the
+ // operation being performed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If one of the predefined values applies, but specific system uses a
+ // different name it's RECOMMENDED to document it in the semantic conventions
+ // for specific GenAI system and use system-specific name in the
+ // instrumentation. If a different name is not documented, instrumentation
+ // libraries SHOULD use applicable predefined value.
+ GenAIOperationNameKey = attribute.Key("gen_ai.operation.name")
+
+ // GenAIOutputMessagesKey is the attribute Key conforming to the
+ // "gen_ai.output.messages" semantic conventions. It represents the messages
+ // returned by the model where each message represents a specific model response
+ // (choice, candidate).
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "role": "assistant",\n "parts": [\n {\n "type": "text",\n
+ // "content": "The weather in Paris is currently rainy with a temperature of
+ // 57°F."\n }\n ],\n "finish_reason": "stop"\n }\n]\n"
+ // Note: Instrumentations MUST follow [Output messages JSON schema]
+ //
+ // Each message represents a single output choice/candidate generated by
+ // the model. Each message corresponds to exactly one generation
+ // (choice/candidate) and vice versa - one choice cannot be split across
+ // multiple messages or one message cannot contain parts from multiple choices.
+ //
+ // When the attribute is recorded on events, it MUST be recorded in structured
+ // form. When recorded on spans, it MAY be recorded as a JSON string if
+ // structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // output messages.
+ //
+ // > [!Warning]
+ // > This attribute is likely to contain sensitive information including
+ // > user/PII data.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [Output messages JSON schema]: /docs/gen-ai/gen-ai-output-messages.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAIOutputMessagesKey = attribute.Key("gen_ai.output.messages")
+
+ // GenAIOutputTypeKey is the attribute Key conforming to the
+ // "gen_ai.output.type" semantic conventions. It represents the represents the
+ // content type requested by the client.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute SHOULD be used when the client requests output of a
+ // specific type. The model may return zero or more outputs of this type.
+ // This attribute specifies the output modality and not the actual output
+ // format. For example, if an image is requested, the actual output could be a
+ // URL pointing to an image file.
+ // Additional output format details may be recorded in the future in the
+ // `gen_ai.output.{type}.*` attributes.
+ GenAIOutputTypeKey = attribute.Key("gen_ai.output.type")
+
+ // GenAIProviderNameKey is the attribute Key conforming to the
+ // "gen_ai.provider.name" semantic conventions. It represents the Generative AI
+ // provider as identified by the client or server instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The attribute SHOULD be set based on the instrumentation's best
+ // knowledge and may differ from the actual model provider.
+ //
+ // Multiple providers, including Azure OpenAI, Gemini, and AI hosting platforms
+ // are accessible using the OpenAI REST API and corresponding client libraries,
+ // but may proxy or host models from different providers.
+ //
+ // The `gen_ai.request.model`, `gen_ai.response.model`, and `server.address`
+ // attributes may help identify the actual system in use.
+ //
+ // The `gen_ai.provider.name` attribute acts as a discriminator that
+ // identifies the GenAI telemetry format flavor specific to that provider
+ // within GenAI semantic conventions.
+ // It SHOULD be set consistently with provider-specific attributes and signals.
+ // For example, GenAI spans, metrics, and events related to AWS Bedrock
+ // should have the `gen_ai.provider.name` set to `aws.bedrock` and include
+ // applicable `aws.bedrock.*` attributes and are not expected to include
+ // `openai.*` attributes.
+ GenAIProviderNameKey = attribute.Key("gen_ai.provider.name")
+
+ // GenAIRequestChoiceCountKey is the attribute Key conforming to the
+ // "gen_ai.request.choice.count" semantic conventions. It represents the target
+ // number of candidate completions to return.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3
+ GenAIRequestChoiceCountKey = attribute.Key("gen_ai.request.choice.count")
+
+ // GenAIRequestEncodingFormatsKey is the attribute Key conforming to the
+ // "gen_ai.request.encoding_formats" semantic conventions. It represents the
+ // encoding formats requested in an embeddings operation, if specified.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "base64"], ["float", "binary"
+ // Note: In some GenAI systems the encoding formats are called embedding types.
+ // Also, some GenAI systems only accept a single format per request.
+ GenAIRequestEncodingFormatsKey = attribute.Key("gen_ai.request.encoding_formats")
+
+ // GenAIRequestFrequencyPenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+ // frequency penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestFrequencyPenaltyKey = attribute.Key("gen_ai.request.frequency_penalty")
+
+ // GenAIRequestMaxTokensKey is the attribute Key conforming to the
+ // "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+ // number of tokens the model generates for a request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestMaxTokensKey = attribute.Key("gen_ai.request.max_tokens")
+
+ // GenAIRequestModelKey is the attribute Key conforming to the
+ // "gen_ai.request.model" semantic conventions. It represents the name of the
+ // GenAI model a request is being made to.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: gpt-4
+ GenAIRequestModelKey = attribute.Key("gen_ai.request.model")
+
+ // GenAIRequestPresencePenaltyKey is the attribute Key conforming to the
+ // "gen_ai.request.presence_penalty" semantic conventions. It represents the
+ // presence penalty setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.1
+ GenAIRequestPresencePenaltyKey = attribute.Key("gen_ai.request.presence_penalty")
+
+ // GenAIRequestSeedKey is the attribute Key conforming to the
+ // "gen_ai.request.seed" semantic conventions. It represents the requests with
+ // same seed value more likely to return same result.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIRequestSeedKey = attribute.Key("gen_ai.request.seed")
+
+ // GenAIRequestStopSequencesKey is the attribute Key conforming to the
+ // "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+ // of sequences that the model will use to stop generating further tokens.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "forest", "lived"
+ GenAIRequestStopSequencesKey = attribute.Key("gen_ai.request.stop_sequences")
+
+ // GenAIRequestTemperatureKey is the attribute Key conforming to the
+ // "gen_ai.request.temperature" semantic conventions. It represents the
+ // temperature setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0.0
+ GenAIRequestTemperatureKey = attribute.Key("gen_ai.request.temperature")
+
+ // GenAIRequestTopKKey is the attribute Key conforming to the
+ // "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopKKey = attribute.Key("gen_ai.request.top_k")
+
+ // GenAIRequestTopPKey is the attribute Key conforming to the
+ // "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+ // setting for the GenAI request.
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1.0
+ GenAIRequestTopPKey = attribute.Key("gen_ai.request.top_p")
+
+ // GenAIResponseFinishReasonsKey is the attribute Key conforming to the
+ // "gen_ai.response.finish_reasons" semantic conventions. It represents the
+ // array of reasons the model stopped generating tokens, corresponding to each
+ // generation received.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "stop"], ["stop", "length"
+ GenAIResponseFinishReasonsKey = attribute.Key("gen_ai.response.finish_reasons")
+
+ // GenAIResponseIDKey is the attribute Key conforming to the
+ // "gen_ai.response.id" semantic conventions. It represents the unique
+ // identifier for the completion.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "chatcmpl-123"
+ GenAIResponseIDKey = attribute.Key("gen_ai.response.id")
+
+ // GenAIResponseModelKey is the attribute Key conforming to the
+ // "gen_ai.response.model" semantic conventions. It represents the name of the
+ // model that generated the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gpt-4-0613"
+ GenAIResponseModelKey = attribute.Key("gen_ai.response.model")
+
+ // GenAISystemInstructionsKey is the attribute Key conforming to the
+ // "gen_ai.system_instructions" semantic conventions. It represents the system
+ // message or instructions provided to the GenAI model separately from the chat
+ // history.
+ //
+ // Type: any
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "[\n {\n "type": "text",\n "content": "You are an Agent that greet
+ // users, always use greetings tool to respond"\n }\n]\n", "[\n {\n "type":
+ // "text",\n "content": "You are a language translator."\n },\n {\n "type":
+ // "text",\n "content": "Your mission is to translate text in English to
+ // French."\n }\n]\n"
+ // Note: This attribute SHOULD be used when the corresponding provider or API
+ // allows to provide system instructions or messages separately from the
+ // chat history.
+ //
+ // Instructions that are part of the chat history SHOULD be recorded in
+ // `gen_ai.input.messages` attribute instead.
+ //
+ // Instrumentations MUST follow [System instructions JSON schema].
+ //
+ // When recorded on spans, it MAY be recorded as a JSON string if structured
+ // format is not supported and SHOULD be recorded in structured form otherwise.
+ //
+ // Instrumentations MAY provide a way for users to filter or truncate
+ // system instructions.
+ //
+ // > [!Warning]
+ // > This attribute may contain sensitive information.
+ //
+ // See [Recording content on attributes]
+ // section for more details.
+ //
+ // [System instructions JSON schema]: /docs/gen-ai/gen-ai-system-instructions.json
+ // [Recording content on attributes]: /docs/gen-ai/gen-ai-spans.md#recording-content-on-attributes
+ GenAISystemInstructionsKey = attribute.Key("gen_ai.system_instructions")
+
+ // GenAITokenTypeKey is the attribute Key conforming to the "gen_ai.token.type"
+ // semantic conventions. It represents the type of token being counted.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "input", "output"
+ GenAITokenTypeKey = attribute.Key("gen_ai.token.type")
+
+ // GenAIToolCallIDKey is the attribute Key conforming to the
+ // "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+ // identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "call_mszuSIzqtI65i1wAUOE8w5H4"
+ GenAIToolCallIDKey = attribute.Key("gen_ai.tool.call.id")
+
+ // GenAIToolDescriptionKey is the attribute Key conforming to the
+ // "gen_ai.tool.description" semantic conventions. It represents the tool
+ // description.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Multiply two numbers"
+ GenAIToolDescriptionKey = attribute.Key("gen_ai.tool.description")
+
+ // GenAIToolNameKey is the attribute Key conforming to the "gen_ai.tool.name"
+ // semantic conventions. It represents the name of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Flights"
+ GenAIToolNameKey = attribute.Key("gen_ai.tool.name")
+
+ // GenAIToolTypeKey is the attribute Key conforming to the "gen_ai.tool.type"
+ // semantic conventions. It represents the type of the tool utilized by the
+ // agent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "function", "extension", "datastore"
+ // Note: Extension: A tool executed on the agent-side to directly call external
+ // APIs, bridging the gap between the agent and real-world systems.
+ // Agent-side operations involve actions that are performed by the agent on the
+ // server or within the agent's controlled environment.
+ // Function: A tool executed on the client-side, where the agent generates
+ // parameters for a predefined function, and the client executes the logic.
+ // Client-side operations are actions taken on the user's end or within the
+ // client application.
+ // Datastore: A tool used by the agent to access and query structured or
+ // unstructured external data for retrieval-augmented tasks or knowledge
+ // updates.
+ GenAIToolTypeKey = attribute.Key("gen_ai.tool.type")
+
+ // GenAIUsageInputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+ // tokens used in the GenAI input (prompt).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 100
+ GenAIUsageInputTokensKey = attribute.Key("gen_ai.usage.input_tokens")
+
+ // GenAIUsageOutputTokensKey is the attribute Key conforming to the
+ // "gen_ai.usage.output_tokens" semantic conventions. It represents the number
+ // of tokens used in the GenAI response (completion).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 180
+ GenAIUsageOutputTokensKey = attribute.Key("gen_ai.usage.output_tokens")
+)
+
+// GenAIAgentDescription returns an attribute KeyValue conforming to the
+// "gen_ai.agent.description" semantic conventions. It represents the free-form
+// description of the GenAI agent provided by the application.
+func GenAIAgentDescription(val string) attribute.KeyValue {
+ return GenAIAgentDescriptionKey.String(val)
+}
+
+// GenAIAgentID returns an attribute KeyValue conforming to the "gen_ai.agent.id"
+// semantic conventions. It represents the unique identifier of the GenAI agent.
+func GenAIAgentID(val string) attribute.KeyValue {
+ return GenAIAgentIDKey.String(val)
+}
+
+// GenAIAgentName returns an attribute KeyValue conforming to the
+// "gen_ai.agent.name" semantic conventions. It represents the human-readable
+// name of the GenAI agent provided by the application.
+func GenAIAgentName(val string) attribute.KeyValue {
+ return GenAIAgentNameKey.String(val)
+}
+
+// GenAIConversationID returns an attribute KeyValue conforming to the
+// "gen_ai.conversation.id" semantic conventions. It represents the unique
+// identifier for a conversation (session, thread), used to store and correlate
+// messages within this conversation.
+func GenAIConversationID(val string) attribute.KeyValue {
+ return GenAIConversationIDKey.String(val)
+}
+
+// GenAIDataSourceID returns an attribute KeyValue conforming to the
+// "gen_ai.data_source.id" semantic conventions. It represents the data source
+// identifier.
+func GenAIDataSourceID(val string) attribute.KeyValue {
+ return GenAIDataSourceIDKey.String(val)
+}
+
+// GenAIRequestChoiceCount returns an attribute KeyValue conforming to the
+// "gen_ai.request.choice.count" semantic conventions. It represents the target
+// number of candidate completions to return.
+func GenAIRequestChoiceCount(val int) attribute.KeyValue {
+ return GenAIRequestChoiceCountKey.Int(val)
+}
+
+// GenAIRequestEncodingFormats returns an attribute KeyValue conforming to the
+// "gen_ai.request.encoding_formats" semantic conventions. It represents the
+// encoding formats requested in an embeddings operation, if specified.
+func GenAIRequestEncodingFormats(val ...string) attribute.KeyValue {
+ return GenAIRequestEncodingFormatsKey.StringSlice(val)
+}
+
+// GenAIRequestFrequencyPenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.frequency_penalty" semantic conventions. It represents the
+// frequency penalty setting for the GenAI request.
+func GenAIRequestFrequencyPenalty(val float64) attribute.KeyValue {
+ return GenAIRequestFrequencyPenaltyKey.Float64(val)
+}
+
+// GenAIRequestMaxTokens returns an attribute KeyValue conforming to the
+// "gen_ai.request.max_tokens" semantic conventions. It represents the maximum
+// number of tokens the model generates for a request.
+func GenAIRequestMaxTokens(val int) attribute.KeyValue {
+ return GenAIRequestMaxTokensKey.Int(val)
+}
+
+// GenAIRequestModel returns an attribute KeyValue conforming to the
+// "gen_ai.request.model" semantic conventions. It represents the name of the
+// GenAI model a request is being made to.
+func GenAIRequestModel(val string) attribute.KeyValue {
+ return GenAIRequestModelKey.String(val)
+}
+
+// GenAIRequestPresencePenalty returns an attribute KeyValue conforming to the
+// "gen_ai.request.presence_penalty" semantic conventions. It represents the
+// presence penalty setting for the GenAI request.
+func GenAIRequestPresencePenalty(val float64) attribute.KeyValue {
+ return GenAIRequestPresencePenaltyKey.Float64(val)
+}
+
+// GenAIRequestSeed returns an attribute KeyValue conforming to the
+// "gen_ai.request.seed" semantic conventions. It represents the requests with
+// same seed value more likely to return same result.
+func GenAIRequestSeed(val int) attribute.KeyValue {
+ return GenAIRequestSeedKey.Int(val)
+}
+
+// GenAIRequestStopSequences returns an attribute KeyValue conforming to the
+// "gen_ai.request.stop_sequences" semantic conventions. It represents the list
+// of sequences that the model will use to stop generating further tokens.
+func GenAIRequestStopSequences(val ...string) attribute.KeyValue {
+ return GenAIRequestStopSequencesKey.StringSlice(val)
+}
+
+// GenAIRequestTemperature returns an attribute KeyValue conforming to the
+// "gen_ai.request.temperature" semantic conventions. It represents the
+// temperature setting for the GenAI request.
+func GenAIRequestTemperature(val float64) attribute.KeyValue {
+ return GenAIRequestTemperatureKey.Float64(val)
+}
+
+// GenAIRequestTopK returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_k" semantic conventions. It represents the top_k sampling
+// setting for the GenAI request.
+func GenAIRequestTopK(val float64) attribute.KeyValue {
+ return GenAIRequestTopKKey.Float64(val)
+}
+
+// GenAIRequestTopP returns an attribute KeyValue conforming to the
+// "gen_ai.request.top_p" semantic conventions. It represents the top_p sampling
+// setting for the GenAI request.
+func GenAIRequestTopP(val float64) attribute.KeyValue {
+ return GenAIRequestTopPKey.Float64(val)
+}
+
+// GenAIResponseFinishReasons returns an attribute KeyValue conforming to the
+// "gen_ai.response.finish_reasons" semantic conventions. It represents the array
+// of reasons the model stopped generating tokens, corresponding to each
+// generation received.
+func GenAIResponseFinishReasons(val ...string) attribute.KeyValue {
+ return GenAIResponseFinishReasonsKey.StringSlice(val)
+}
+
+// GenAIResponseID returns an attribute KeyValue conforming to the
+// "gen_ai.response.id" semantic conventions. It represents the unique identifier
+// for the completion.
+func GenAIResponseID(val string) attribute.KeyValue {
+ return GenAIResponseIDKey.String(val)
+}
+
+// GenAIResponseModel returns an attribute KeyValue conforming to the
+// "gen_ai.response.model" semantic conventions. It represents the name of the
+// model that generated the response.
+func GenAIResponseModel(val string) attribute.KeyValue {
+ return GenAIResponseModelKey.String(val)
+}
+
+// GenAIToolCallID returns an attribute KeyValue conforming to the
+// "gen_ai.tool.call.id" semantic conventions. It represents the tool call
+// identifier.
+func GenAIToolCallID(val string) attribute.KeyValue {
+ return GenAIToolCallIDKey.String(val)
+}
+
+// GenAIToolDescription returns an attribute KeyValue conforming to the
+// "gen_ai.tool.description" semantic conventions. It represents the tool
+// description.
+func GenAIToolDescription(val string) attribute.KeyValue {
+ return GenAIToolDescriptionKey.String(val)
+}
+
+// GenAIToolName returns an attribute KeyValue conforming to the
+// "gen_ai.tool.name" semantic conventions. It represents the name of the tool
+// utilized by the agent.
+func GenAIToolName(val string) attribute.KeyValue {
+ return GenAIToolNameKey.String(val)
+}
+
+// GenAIToolType returns an attribute KeyValue conforming to the
+// "gen_ai.tool.type" semantic conventions. It represents the type of the tool
+// utilized by the agent.
+func GenAIToolType(val string) attribute.KeyValue {
+ return GenAIToolTypeKey.String(val)
+}
+
+// GenAIUsageInputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.input_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI input (prompt).
+func GenAIUsageInputTokens(val int) attribute.KeyValue {
+ return GenAIUsageInputTokensKey.Int(val)
+}
+
+// GenAIUsageOutputTokens returns an attribute KeyValue conforming to the
+// "gen_ai.usage.output_tokens" semantic conventions. It represents the number of
+// tokens used in the GenAI response (completion).
+func GenAIUsageOutputTokens(val int) attribute.KeyValue {
+ return GenAIUsageOutputTokensKey.Int(val)
+}
+
+// Enum values for gen_ai.operation.name
+var (
+ // Chat completion operation such as [OpenAI Chat API]
+ // Stability: development
+ //
+ // [OpenAI Chat API]: https://platform.openai.com/docs/api-reference/chat
+ GenAIOperationNameChat = GenAIOperationNameKey.String("chat")
+ // Multimodal content generation operation such as [Gemini Generate Content]
+ // Stability: development
+ //
+ // [Gemini Generate Content]: https://ai.google.dev/api/generate-content
+ GenAIOperationNameGenerateContent = GenAIOperationNameKey.String("generate_content")
+ // Text completions operation such as [OpenAI Completions API (Legacy)]
+ // Stability: development
+ //
+ // [OpenAI Completions API (Legacy)]: https://platform.openai.com/docs/api-reference/completions
+ GenAIOperationNameTextCompletion = GenAIOperationNameKey.String("text_completion")
+ // Embeddings operation such as [OpenAI Create embeddings API]
+ // Stability: development
+ //
+ // [OpenAI Create embeddings API]: https://platform.openai.com/docs/api-reference/embeddings/create
+ GenAIOperationNameEmbeddings = GenAIOperationNameKey.String("embeddings")
+ // Create GenAI agent
+ // Stability: development
+ GenAIOperationNameCreateAgent = GenAIOperationNameKey.String("create_agent")
+ // Invoke GenAI agent
+ // Stability: development
+ GenAIOperationNameInvokeAgent = GenAIOperationNameKey.String("invoke_agent")
+ // Execute a tool
+ // Stability: development
+ GenAIOperationNameExecuteTool = GenAIOperationNameKey.String("execute_tool")
+)
+
+// Enum values for gen_ai.output.type
+var (
+ // Plain text
+ // Stability: development
+ GenAIOutputTypeText = GenAIOutputTypeKey.String("text")
+ // JSON object with known or unknown schema
+ // Stability: development
+ GenAIOutputTypeJSON = GenAIOutputTypeKey.String("json")
+ // Image
+ // Stability: development
+ GenAIOutputTypeImage = GenAIOutputTypeKey.String("image")
+ // Speech
+ // Stability: development
+ GenAIOutputTypeSpeech = GenAIOutputTypeKey.String("speech")
+)
+
+// Enum values for gen_ai.provider.name
+var (
+ // [OpenAI]
+ // Stability: development
+ //
+ // [OpenAI]: https://openai.com/
+ GenAIProviderNameOpenAI = GenAIProviderNameKey.String("openai")
+ // Any Google generative AI endpoint
+ // Stability: development
+ GenAIProviderNameGCPGenAI = GenAIProviderNameKey.String("gcp.gen_ai")
+ // [Vertex AI]
+ // Stability: development
+ //
+ // [Vertex AI]: https://cloud.google.com/vertex-ai
+ GenAIProviderNameGCPVertexAI = GenAIProviderNameKey.String("gcp.vertex_ai")
+ // [Gemini]
+ // Stability: development
+ //
+ // [Gemini]: https://cloud.google.com/products/gemini
+ GenAIProviderNameGCPGemini = GenAIProviderNameKey.String("gcp.gemini")
+ // [Anthropic]
+ // Stability: development
+ //
+ // [Anthropic]: https://www.anthropic.com/
+ GenAIProviderNameAnthropic = GenAIProviderNameKey.String("anthropic")
+ // [Cohere]
+ // Stability: development
+ //
+ // [Cohere]: https://cohere.com/
+ GenAIProviderNameCohere = GenAIProviderNameKey.String("cohere")
+ // Azure AI Inference
+ // Stability: development
+ GenAIProviderNameAzureAIInference = GenAIProviderNameKey.String("azure.ai.inference")
+ // [Azure OpenAI]
+ // Stability: development
+ //
+ // [Azure OpenAI]: https://azure.microsoft.com/products/ai-services/openai-service/
+ GenAIProviderNameAzureAIOpenAI = GenAIProviderNameKey.String("azure.ai.openai")
+ // [IBM Watsonx AI]
+ // Stability: development
+ //
+ // [IBM Watsonx AI]: https://www.ibm.com/products/watsonx-ai
+ GenAIProviderNameIBMWatsonxAI = GenAIProviderNameKey.String("ibm.watsonx.ai")
+ // [AWS Bedrock]
+ // Stability: development
+ //
+ // [AWS Bedrock]: https://aws.amazon.com/bedrock
+ GenAIProviderNameAWSBedrock = GenAIProviderNameKey.String("aws.bedrock")
+ // [Perplexity]
+ // Stability: development
+ //
+ // [Perplexity]: https://www.perplexity.ai/
+ GenAIProviderNamePerplexity = GenAIProviderNameKey.String("perplexity")
+ // [xAI]
+ // Stability: development
+ //
+ // [xAI]: https://x.ai/
+ GenAIProviderNameXAI = GenAIProviderNameKey.String("x_ai")
+ // [DeepSeek]
+ // Stability: development
+ //
+ // [DeepSeek]: https://www.deepseek.com/
+ GenAIProviderNameDeepseek = GenAIProviderNameKey.String("deepseek")
+ // [Groq]
+ // Stability: development
+ //
+ // [Groq]: https://groq.com/
+ GenAIProviderNameGroq = GenAIProviderNameKey.String("groq")
+ // [Mistral AI]
+ // Stability: development
+ //
+ // [Mistral AI]: https://mistral.ai/
+ GenAIProviderNameMistralAI = GenAIProviderNameKey.String("mistral_ai")
+)
+
+// Enum values for gen_ai.token.type
+var (
+ // Input tokens (prompt, input, etc.)
+ // Stability: development
+ GenAITokenTypeInput = GenAITokenTypeKey.String("input")
+ // Output tokens (completion, response, etc.)
+ // Stability: development
+ GenAITokenTypeOutput = GenAITokenTypeKey.String("output")
+)
+
+// Namespace: geo
+const (
+ // GeoContinentCodeKey is the attribute Key conforming to the
+ // "geo.continent.code" semantic conventions. It represents the two-letter code
+ // representing continent’s name.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ GeoContinentCodeKey = attribute.Key("geo.continent.code")
+
+ // GeoCountryISOCodeKey is the attribute Key conforming to the
+ // "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+ // Country Code ([ISO 3166-1 alpha2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA"
+ //
+ // [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+ GeoCountryISOCodeKey = attribute.Key("geo.country.iso_code")
+
+ // GeoLocalityNameKey is the attribute Key conforming to the "geo.locality.name"
+ // semantic conventions. It represents the locality name. Represents the name of
+ // a city, town, village, or similar populated place.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Montreal", "Berlin"
+ GeoLocalityNameKey = attribute.Key("geo.locality.name")
+
+ // GeoLocationLatKey is the attribute Key conforming to the "geo.location.lat"
+ // semantic conventions. It represents the latitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 45.505918
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLatKey = attribute.Key("geo.location.lat")
+
+ // GeoLocationLonKey is the attribute Key conforming to the "geo.location.lon"
+ // semantic conventions. It represents the longitude of the geo location in
+ // [WGS84].
+ //
+ // Type: double
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -73.61483
+ //
+ // [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+ GeoLocationLonKey = attribute.Key("geo.location.lon")
+
+ // GeoPostalCodeKey is the attribute Key conforming to the "geo.postal_code"
+ // semantic conventions. It represents the postal code associated with the
+ // location. Values appropriate for this field may also be known as a postcode
+ // or ZIP code and will vary widely from country to country.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "94040"
+ GeoPostalCodeKey = attribute.Key("geo.postal_code")
+
+ // GeoRegionISOCodeKey is the attribute Key conforming to the
+ // "geo.region.iso_code" semantic conventions. It represents the region ISO code
+ // ([ISO 3166-2]).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CA-QC"
+ //
+ // [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+ GeoRegionISOCodeKey = attribute.Key("geo.region.iso_code")
+)
+
+// GeoCountryISOCode returns an attribute KeyValue conforming to the
+// "geo.country.iso_code" semantic conventions. It represents the two-letter ISO
+// Country Code ([ISO 3166-1 alpha2]).
+//
+// [ISO 3166-1 alpha2]: https://wikipedia.org/wiki/ISO_3166-1#Codes
+func GeoCountryISOCode(val string) attribute.KeyValue {
+ return GeoCountryISOCodeKey.String(val)
+}
+
+// GeoLocalityName returns an attribute KeyValue conforming to the
+// "geo.locality.name" semantic conventions. It represents the locality name.
+// Represents the name of a city, town, village, or similar populated place.
+func GeoLocalityName(val string) attribute.KeyValue {
+ return GeoLocalityNameKey.String(val)
+}
+
+// GeoLocationLat returns an attribute KeyValue conforming to the
+// "geo.location.lat" semantic conventions. It represents the latitude of the geo
+// location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLat(val float64) attribute.KeyValue {
+ return GeoLocationLatKey.Float64(val)
+}
+
+// GeoLocationLon returns an attribute KeyValue conforming to the
+// "geo.location.lon" semantic conventions. It represents the longitude of the
+// geo location in [WGS84].
+//
+// [WGS84]: https://wikipedia.org/wiki/World_Geodetic_System#WGS84
+func GeoLocationLon(val float64) attribute.KeyValue {
+ return GeoLocationLonKey.Float64(val)
+}
+
+// GeoPostalCode returns an attribute KeyValue conforming to the
+// "geo.postal_code" semantic conventions. It represents the postal code
+// associated with the location. Values appropriate for this field may also be
+// known as a postcode or ZIP code and will vary widely from country to country.
+func GeoPostalCode(val string) attribute.KeyValue {
+ return GeoPostalCodeKey.String(val)
+}
+
+// GeoRegionISOCode returns an attribute KeyValue conforming to the
+// "geo.region.iso_code" semantic conventions. It represents the region ISO code
+// ([ISO 3166-2]).
+//
+// [ISO 3166-2]: https://wikipedia.org/wiki/ISO_3166-2
+func GeoRegionISOCode(val string) attribute.KeyValue {
+ return GeoRegionISOCodeKey.String(val)
+}
+
+// Enum values for geo.continent.code
+var (
+ // Africa
+ // Stability: development
+ GeoContinentCodeAf = GeoContinentCodeKey.String("AF")
+ // Antarctica
+ // Stability: development
+ GeoContinentCodeAn = GeoContinentCodeKey.String("AN")
+ // Asia
+ // Stability: development
+ GeoContinentCodeAs = GeoContinentCodeKey.String("AS")
+ // Europe
+ // Stability: development
+ GeoContinentCodeEu = GeoContinentCodeKey.String("EU")
+ // North America
+ // Stability: development
+ GeoContinentCodeNa = GeoContinentCodeKey.String("NA")
+ // Oceania
+ // Stability: development
+ GeoContinentCodeOc = GeoContinentCodeKey.String("OC")
+ // South America
+ // Stability: development
+ GeoContinentCodeSa = GeoContinentCodeKey.String("SA")
+)
+
+// Namespace: go
+const (
+ // GoMemoryTypeKey is the attribute Key conforming to the "go.memory.type"
+ // semantic conventions. It represents the type of memory.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "other", "stack"
+ GoMemoryTypeKey = attribute.Key("go.memory.type")
+)
+
+// Enum values for go.memory.type
+var (
+ // Memory allocated from the heap that is reserved for stack space, whether or
+ // not it is currently in-use.
+ // Stability: development
+ GoMemoryTypeStack = GoMemoryTypeKey.String("stack")
+ // Memory used by the Go runtime, excluding other categories of memory usage
+ // described in this enumeration.
+ // Stability: development
+ GoMemoryTypeOther = GoMemoryTypeKey.String("other")
+)
+
+// Namespace: graphql
+const (
+ // GraphQLDocumentKey is the attribute Key conforming to the "graphql.document"
+ // semantic conventions. It represents the GraphQL document being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: query findBookById { bookById(id: ?) { name } }
+ // Note: The value may be sanitized to exclude sensitive information.
+ GraphQLDocumentKey = attribute.Key("graphql.document")
+
+ // GraphQLOperationNameKey is the attribute Key conforming to the
+ // "graphql.operation.name" semantic conventions. It represents the name of the
+ // operation being executed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: findBookById
+ GraphQLOperationNameKey = attribute.Key("graphql.operation.name")
+
+ // GraphQLOperationTypeKey is the attribute Key conforming to the
+ // "graphql.operation.type" semantic conventions. It represents the type of the
+ // operation being executed.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "query", "mutation", "subscription"
+ GraphQLOperationTypeKey = attribute.Key("graphql.operation.type")
+)
+
+// GraphQLDocument returns an attribute KeyValue conforming to the
+// "graphql.document" semantic conventions. It represents the GraphQL document
+// being executed.
+func GraphQLDocument(val string) attribute.KeyValue {
+ return GraphQLDocumentKey.String(val)
+}
+
+// GraphQLOperationName returns an attribute KeyValue conforming to the
+// "graphql.operation.name" semantic conventions. It represents the name of the
+// operation being executed.
+func GraphQLOperationName(val string) attribute.KeyValue {
+ return GraphQLOperationNameKey.String(val)
+}
+
+// Enum values for graphql.operation.type
+var (
+ // GraphQL query
+ // Stability: development
+ GraphQLOperationTypeQuery = GraphQLOperationTypeKey.String("query")
+ // GraphQL mutation
+ // Stability: development
+ GraphQLOperationTypeMutation = GraphQLOperationTypeKey.String("mutation")
+ // GraphQL subscription
+ // Stability: development
+ GraphQLOperationTypeSubscription = GraphQLOperationTypeKey.String("subscription")
+)
+
+// Namespace: heroku
+const (
+ // HerokuAppIDKey is the attribute Key conforming to the "heroku.app.id"
+ // semantic conventions. It represents the unique identifier for the
+ // application.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2daa2797-e42b-4624-9322-ec3f968df4da"
+ HerokuAppIDKey = attribute.Key("heroku.app.id")
+
+ // HerokuReleaseCommitKey is the attribute Key conforming to the
+ // "heroku.release.commit" semantic conventions. It represents the commit hash
+ // for the current release.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "e6134959463efd8966b20e75b913cafe3f5ec"
+ HerokuReleaseCommitKey = attribute.Key("heroku.release.commit")
+
+ // HerokuReleaseCreationTimestampKey is the attribute Key conforming to the
+ // "heroku.release.creation_timestamp" semantic conventions. It represents the
+ // time and date the release was created.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2022-10-23T18:00:42Z"
+ HerokuReleaseCreationTimestampKey = attribute.Key("heroku.release.creation_timestamp")
+)
+
+// HerokuAppID returns an attribute KeyValue conforming to the "heroku.app.id"
+// semantic conventions. It represents the unique identifier for the application.
+func HerokuAppID(val string) attribute.KeyValue {
+ return HerokuAppIDKey.String(val)
+}
+
+// HerokuReleaseCommit returns an attribute KeyValue conforming to the
+// "heroku.release.commit" semantic conventions. It represents the commit hash
+// for the current release.
+func HerokuReleaseCommit(val string) attribute.KeyValue {
+ return HerokuReleaseCommitKey.String(val)
+}
+
+// HerokuReleaseCreationTimestamp returns an attribute KeyValue conforming to the
+// "heroku.release.creation_timestamp" semantic conventions. It represents the
+// time and date the release was created.
+func HerokuReleaseCreationTimestamp(val string) attribute.KeyValue {
+ return HerokuReleaseCreationTimestampKey.String(val)
+}
+
+// Namespace: host
+const (
+ // HostArchKey is the attribute Key conforming to the "host.arch" semantic
+ // conventions. It represents the CPU architecture the host system is running
+ // on.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HostArchKey = attribute.Key("host.arch")
+
+ // HostCPUCacheL2SizeKey is the attribute Key conforming to the
+ // "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+ // level 2 memory cache available to the processor (in Bytes).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12288000
+ HostCPUCacheL2SizeKey = attribute.Key("host.cpu.cache.l2.size")
+
+ // HostCPUFamilyKey is the attribute Key conforming to the "host.cpu.family"
+ // semantic conventions. It represents the family or generation of the CPU.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "PA-RISC 1.1e"
+ HostCPUFamilyKey = attribute.Key("host.cpu.family")
+
+ // HostCPUModelIDKey is the attribute Key conforming to the "host.cpu.model.id"
+ // semantic conventions. It represents the model identifier. It provides more
+ // granular information about the CPU, distinguishing it from other CPUs within
+ // the same family.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "6", "9000/778/B180L"
+ HostCPUModelIDKey = attribute.Key("host.cpu.model.id")
+
+ // HostCPUModelNameKey is the attribute Key conforming to the
+ // "host.cpu.model.name" semantic conventions. It represents the model
+ // designation of the processor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz"
+ HostCPUModelNameKey = attribute.Key("host.cpu.model.name")
+
+ // HostCPUSteppingKey is the attribute Key conforming to the "host.cpu.stepping"
+ // semantic conventions. It represents the stepping or core revisions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1", "r1p1"
+ HostCPUSteppingKey = attribute.Key("host.cpu.stepping")
+
+ // HostCPUVendorIDKey is the attribute Key conforming to the
+ // "host.cpu.vendor.id" semantic conventions. It represents the processor
+ // manufacturer identifier. A maximum 12-character string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "GenuineIntel"
+ // Note: [CPUID] command returns the vendor ID string in EBX, EDX and ECX
+ // registers. Writing these to memory in this order results in a 12-character
+ // string.
+ //
+ // [CPUID]: https://wiki.osdev.org/CPUID
+ HostCPUVendorIDKey = attribute.Key("host.cpu.vendor.id")
+
+ // HostIDKey is the attribute Key conforming to the "host.id" semantic
+ // conventions. It represents the unique host ID. For Cloud, this must be the
+ // instance_id assigned by the cloud provider. For non-containerized systems,
+ // this should be the `machine-id`. See the table below for the sources to use
+ // to determine the `machine-id` based on operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fdbf79e8af94cb7f9e8df36789187052"
+ HostIDKey = attribute.Key("host.id")
+
+ // HostImageIDKey is the attribute Key conforming to the "host.image.id"
+ // semantic conventions. It represents the VM image ID or host OS image ID. For
+ // Cloud, this value is from the provider.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ami-07b06b442921831e5"
+ HostImageIDKey = attribute.Key("host.image.id")
+
+ // HostImageNameKey is the attribute Key conforming to the "host.image.name"
+ // semantic conventions. It represents the name of the VM image or OS install
+ // the host was instantiated from.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "infra-ami-eks-worker-node-7d4ec78312", "CentOS-8-x86_64-1905"
+ HostImageNameKey = attribute.Key("host.image.name")
+
+ // HostImageVersionKey is the attribute Key conforming to the
+ // "host.image.version" semantic conventions. It represents the version string
+ // of the VM image or host OS as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ HostImageVersionKey = attribute.Key("host.image.version")
+
+ // HostIPKey is the attribute Key conforming to the "host.ip" semantic
+ // conventions. It represents the available IP addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "192.168.1.140", "fe80::abc2:4a28:737a:609e"
+ // Note: IPv4 Addresses MUST be specified in dotted-quad notation. IPv6
+ // addresses MUST be specified in the [RFC 5952] format.
+ //
+ // [RFC 5952]: https://www.rfc-editor.org/rfc/rfc5952.html
+ HostIPKey = attribute.Key("host.ip")
+
+ // HostMacKey is the attribute Key conforming to the "host.mac" semantic
+ // conventions. It represents the available MAC addresses of the host, excluding
+ // loopback interfaces.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "AC-DE-48-23-45-67", "AC-DE-48-23-45-67-01-9F"
+ // Note: MAC Addresses MUST be represented in [IEEE RA hexadecimal form]: as
+ // hyphen-separated octets in uppercase hexadecimal form from most to least
+ // significant.
+ //
+ // [IEEE RA hexadecimal form]: https://standards.ieee.org/wp-content/uploads/import/documents/tutorials/eui.pdf
+ HostMacKey = attribute.Key("host.mac")
+
+ // HostNameKey is the attribute Key conforming to the "host.name" semantic
+ // conventions. It represents the name of the host. On Unix systems, it may
+ // contain what the hostname command returns, or the fully qualified hostname,
+ // or another name specified by the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-test"
+ HostNameKey = attribute.Key("host.name")
+
+ // HostTypeKey is the attribute Key conforming to the "host.type" semantic
+ // conventions. It represents the type of host. For Cloud, this must be the
+ // machine type.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "n1-standard-1"
+ HostTypeKey = attribute.Key("host.type")
+)
+
+// HostCPUCacheL2Size returns an attribute KeyValue conforming to the
+// "host.cpu.cache.l2.size" semantic conventions. It represents the amount of
+// level 2 memory cache available to the processor (in Bytes).
+func HostCPUCacheL2Size(val int) attribute.KeyValue {
+ return HostCPUCacheL2SizeKey.Int(val)
+}
+
+// HostCPUFamily returns an attribute KeyValue conforming to the
+// "host.cpu.family" semantic conventions. It represents the family or generation
+// of the CPU.
+func HostCPUFamily(val string) attribute.KeyValue {
+ return HostCPUFamilyKey.String(val)
+}
+
+// HostCPUModelID returns an attribute KeyValue conforming to the
+// "host.cpu.model.id" semantic conventions. It represents the model identifier.
+// It provides more granular information about the CPU, distinguishing it from
+// other CPUs within the same family.
+func HostCPUModelID(val string) attribute.KeyValue {
+ return HostCPUModelIDKey.String(val)
+}
+
+// HostCPUModelName returns an attribute KeyValue conforming to the
+// "host.cpu.model.name" semantic conventions. It represents the model
+// designation of the processor.
+func HostCPUModelName(val string) attribute.KeyValue {
+ return HostCPUModelNameKey.String(val)
+}
+
+// HostCPUStepping returns an attribute KeyValue conforming to the
+// "host.cpu.stepping" semantic conventions. It represents the stepping or core
+// revisions.
+func HostCPUStepping(val string) attribute.KeyValue {
+ return HostCPUSteppingKey.String(val)
+}
+
+// HostCPUVendorID returns an attribute KeyValue conforming to the
+// "host.cpu.vendor.id" semantic conventions. It represents the processor
+// manufacturer identifier. A maximum 12-character string.
+func HostCPUVendorID(val string) attribute.KeyValue {
+ return HostCPUVendorIDKey.String(val)
+}
+
+// HostID returns an attribute KeyValue conforming to the "host.id" semantic
+// conventions. It represents the unique host ID. For Cloud, this must be the
+// instance_id assigned by the cloud provider. For non-containerized systems,
+// this should be the `machine-id`. See the table below for the sources to use to
+// determine the `machine-id` based on operating system.
+func HostID(val string) attribute.KeyValue {
+ return HostIDKey.String(val)
+}
+
+// HostImageID returns an attribute KeyValue conforming to the "host.image.id"
+// semantic conventions. It represents the VM image ID or host OS image ID. For
+// Cloud, this value is from the provider.
+func HostImageID(val string) attribute.KeyValue {
+ return HostImageIDKey.String(val)
+}
+
+// HostImageName returns an attribute KeyValue conforming to the
+// "host.image.name" semantic conventions. It represents the name of the VM image
+// or OS install the host was instantiated from.
+func HostImageName(val string) attribute.KeyValue {
+ return HostImageNameKey.String(val)
+}
+
+// HostImageVersion returns an attribute KeyValue conforming to the
+// "host.image.version" semantic conventions. It represents the version string of
+// the VM image or host OS as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func HostImageVersion(val string) attribute.KeyValue {
+ return HostImageVersionKey.String(val)
+}
+
+// HostIP returns an attribute KeyValue conforming to the "host.ip" semantic
+// conventions. It represents the available IP addresses of the host, excluding
+// loopback interfaces.
+func HostIP(val ...string) attribute.KeyValue {
+ return HostIPKey.StringSlice(val)
+}
+
+// HostMac returns an attribute KeyValue conforming to the "host.mac" semantic
+// conventions. It represents the available MAC addresses of the host, excluding
+// loopback interfaces.
+func HostMac(val ...string) attribute.KeyValue {
+ return HostMacKey.StringSlice(val)
+}
+
+// HostName returns an attribute KeyValue conforming to the "host.name" semantic
+// conventions. It represents the name of the host. On Unix systems, it may
+// contain what the hostname command returns, or the fully qualified hostname, or
+// another name specified by the user.
+func HostName(val string) attribute.KeyValue {
+ return HostNameKey.String(val)
+}
+
+// HostType returns an attribute KeyValue conforming to the "host.type" semantic
+// conventions. It represents the type of host. For Cloud, this must be the
+// machine type.
+func HostType(val string) attribute.KeyValue {
+ return HostTypeKey.String(val)
+}
+
+// Enum values for host.arch
+var (
+ // AMD64
+ // Stability: development
+ HostArchAMD64 = HostArchKey.String("amd64")
+ // ARM32
+ // Stability: development
+ HostArchARM32 = HostArchKey.String("arm32")
+ // ARM64
+ // Stability: development
+ HostArchARM64 = HostArchKey.String("arm64")
+ // Itanium
+ // Stability: development
+ HostArchIA64 = HostArchKey.String("ia64")
+ // 32-bit PowerPC
+ // Stability: development
+ HostArchPPC32 = HostArchKey.String("ppc32")
+ // 64-bit PowerPC
+ // Stability: development
+ HostArchPPC64 = HostArchKey.String("ppc64")
+ // IBM z/Architecture
+ // Stability: development
+ HostArchS390x = HostArchKey.String("s390x")
+ // 32-bit x86
+ // Stability: development
+ HostArchX86 = HostArchKey.String("x86")
+)
+
+// Namespace: http
+const (
+ // HTTPConnectionStateKey is the attribute Key conforming to the
+ // "http.connection.state" semantic conventions. It represents the state of the
+ // HTTP connection in the HTTP connection pool.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "idle"
+ HTTPConnectionStateKey = attribute.Key("http.connection.state")
+
+ // HTTPRequestBodySizeKey is the attribute Key conforming to the
+ // "http.request.body.size" semantic conventions. It represents the size of the
+ // request payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPRequestBodySizeKey = attribute.Key("http.request.body.size")
+
+ // HTTPRequestMethodKey is the attribute Key conforming to the
+ // "http.request.method" semantic conventions. It represents the HTTP request
+ // method.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GET", "POST", "HEAD"
+ // Note: HTTP request method value SHOULD be "known" to the instrumentation.
+ // By default, this convention defines "known" methods as the ones listed in
+ // [RFC9110]
+ // and the PATCH method defined in [RFC5789].
+ //
+ // If the HTTP request method is not known to instrumentation, it MUST set the
+ // `http.request.method` attribute to `_OTHER`.
+ //
+ // If the HTTP instrumentation could end up converting valid HTTP request
+ // methods to `_OTHER`, then it MUST provide a way to override
+ // the list of known HTTP methods. If this override is done via environment
+ // variable, then the environment variable MUST be named
+ // OTEL_INSTRUMENTATION_HTTP_KNOWN_METHODS and support a comma-separated list of
+ // case-sensitive known HTTP methods
+ // (this list MUST be a full override of the default known method, it is not a
+ // list of known methods in addition to the defaults).
+ //
+ // HTTP method names are case-sensitive and `http.request.method` attribute
+ // value MUST match a known HTTP method name exactly.
+ // Instrumentations for specific web frameworks that consider HTTP methods to be
+ // case insensitive, SHOULD populate a canonical equivalent.
+ // Tracing instrumentations that do so, MUST also set
+ // `http.request.method_original` to the original value.
+ //
+ // [RFC9110]: https://www.rfc-editor.org/rfc/rfc9110.html#name-methods
+ // [RFC5789]: https://www.rfc-editor.org/rfc/rfc5789.html
+ HTTPRequestMethodKey = attribute.Key("http.request.method")
+
+ // HTTPRequestMethodOriginalKey is the attribute Key conforming to the
+ // "http.request.method_original" semantic conventions. It represents the
+ // original HTTP method sent by the client in the request line.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "GeT", "ACL", "foo"
+ HTTPRequestMethodOriginalKey = attribute.Key("http.request.method_original")
+
+ // HTTPRequestResendCountKey is the attribute Key conforming to the
+ // "http.request.resend_count" semantic conventions. It represents the ordinal
+ // number of request resending attempt (for any reason, including redirects).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Note: The resend count SHOULD be updated each time an HTTP request gets
+ // resent by the client, regardless of what was the cause of the resending (e.g.
+ // redirection, authorization failure, 503 Server Unavailable, network issues,
+ // or any other).
+ HTTPRequestResendCountKey = attribute.Key("http.request.resend_count")
+
+ // HTTPRequestSizeKey is the attribute Key conforming to the "http.request.size"
+ // semantic conventions. It represents the total size of the request in bytes.
+ // This should be the total number of bytes sent over the wire, including the
+ // request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers, and request
+ // body if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPRequestSizeKey = attribute.Key("http.request.size")
+
+ // HTTPResponseBodySizeKey is the attribute Key conforming to the
+ // "http.response.body.size" semantic conventions. It represents the size of the
+ // response payload body in bytes. This is the number of bytes transferred
+ // excluding headers and is often, but not always, present as the
+ // [Content-Length] header. For requests using transport encoding, this should
+ // be the compressed size.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+ HTTPResponseBodySizeKey = attribute.Key("http.response.body.size")
+
+ // HTTPResponseSizeKey is the attribute Key conforming to the
+ // "http.response.size" semantic conventions. It represents the total size of
+ // the response in bytes. This should be the total number of bytes sent over the
+ // wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+ // headers, and response body and trailers if any.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ HTTPResponseSizeKey = attribute.Key("http.response.size")
+
+ // HTTPResponseStatusCodeKey is the attribute Key conforming to the
+ // "http.response.status_code" semantic conventions. It represents the
+ // [HTTP response status code].
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 200
+ //
+ // [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+ HTTPResponseStatusCodeKey = attribute.Key("http.response.status_code")
+
+ // HTTPRouteKey is the attribute Key conforming to the "http.route" semantic
+ // conventions. It represents the matched route, that is, the path template in
+ // the format used by the respective server framework.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/users/:userID?", "{controller}/{action}/{id?}"
+ // Note: MUST NOT be populated when this is not supported by the HTTP server
+ // framework as the route attribute should have low-cardinality and the URI path
+ // can NOT substitute it.
+ // SHOULD include the [application root] if there is one.
+ //
+ // [application root]: /docs/http/http-spans.md#http-server-definitions
+ HTTPRouteKey = attribute.Key("http.route")
+)
+
+// HTTPRequestBodySize returns an attribute KeyValue conforming to the
+// "http.request.body.size" semantic conventions. It represents the size of the
+// request payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPRequestBodySize(val int) attribute.KeyValue {
+ return HTTPRequestBodySizeKey.Int(val)
+}
+
+// HTTPRequestHeader returns an attribute KeyValue conforming to the
+// "http.request.header" semantic conventions. It represents the HTTP request
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPRequestHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.request.header."+key, val)
+}
+
+// HTTPRequestMethodOriginal returns an attribute KeyValue conforming to the
+// "http.request.method_original" semantic conventions. It represents the
+// original HTTP method sent by the client in the request line.
+func HTTPRequestMethodOriginal(val string) attribute.KeyValue {
+ return HTTPRequestMethodOriginalKey.String(val)
+}
+
+// HTTPRequestResendCount returns an attribute KeyValue conforming to the
+// "http.request.resend_count" semantic conventions. It represents the ordinal
+// number of request resending attempt (for any reason, including redirects).
+func HTTPRequestResendCount(val int) attribute.KeyValue {
+ return HTTPRequestResendCountKey.Int(val)
+}
+
+// HTTPRequestSize returns an attribute KeyValue conforming to the
+// "http.request.size" semantic conventions. It represents the total size of the
+// request in bytes. This should be the total number of bytes sent over the wire,
+// including the request line (HTTP/1.1), framing (HTTP/2 and HTTP/3), headers,
+// and request body if any.
+func HTTPRequestSize(val int) attribute.KeyValue {
+ return HTTPRequestSizeKey.Int(val)
+}
+
+// HTTPResponseBodySize returns an attribute KeyValue conforming to the
+// "http.response.body.size" semantic conventions. It represents the size of the
+// response payload body in bytes. This is the number of bytes transferred
+// excluding headers and is often, but not always, present as the
+// [Content-Length] header. For requests using transport encoding, this should be
+// the compressed size.
+//
+// [Content-Length]: https://www.rfc-editor.org/rfc/rfc9110.html#field.content-length
+func HTTPResponseBodySize(val int) attribute.KeyValue {
+ return HTTPResponseBodySizeKey.Int(val)
+}
+
+// HTTPResponseHeader returns an attribute KeyValue conforming to the
+// "http.response.header" semantic conventions. It represents the HTTP response
+// headers, `` being the normalized HTTP Header name (lowercase), the value
+// being the header values.
+func HTTPResponseHeader(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("http.response.header."+key, val)
+}
+
+// HTTPResponseSize returns an attribute KeyValue conforming to the
+// "http.response.size" semantic conventions. It represents the total size of the
+// response in bytes. This should be the total number of bytes sent over the
+// wire, including the status line (HTTP/1.1), framing (HTTP/2 and HTTP/3),
+// headers, and response body and trailers if any.
+func HTTPResponseSize(val int) attribute.KeyValue {
+ return HTTPResponseSizeKey.Int(val)
+}
+
+// HTTPResponseStatusCode returns an attribute KeyValue conforming to the
+// "http.response.status_code" semantic conventions. It represents the
+// [HTTP response status code].
+//
+// [HTTP response status code]: https://tools.ietf.org/html/rfc7231#section-6
+func HTTPResponseStatusCode(val int) attribute.KeyValue {
+ return HTTPResponseStatusCodeKey.Int(val)
+}
+
+// HTTPRoute returns an attribute KeyValue conforming to the "http.route"
+// semantic conventions. It represents the matched route, that is, the path
+// template in the format used by the respective server framework.
+func HTTPRoute(val string) attribute.KeyValue {
+ return HTTPRouteKey.String(val)
+}
+
+// Enum values for http.connection.state
+var (
+ // active state.
+ // Stability: development
+ HTTPConnectionStateActive = HTTPConnectionStateKey.String("active")
+ // idle state.
+ // Stability: development
+ HTTPConnectionStateIdle = HTTPConnectionStateKey.String("idle")
+)
+
+// Enum values for http.request.method
+var (
+ // CONNECT method.
+ // Stability: stable
+ HTTPRequestMethodConnect = HTTPRequestMethodKey.String("CONNECT")
+ // DELETE method.
+ // Stability: stable
+ HTTPRequestMethodDelete = HTTPRequestMethodKey.String("DELETE")
+ // GET method.
+ // Stability: stable
+ HTTPRequestMethodGet = HTTPRequestMethodKey.String("GET")
+ // HEAD method.
+ // Stability: stable
+ HTTPRequestMethodHead = HTTPRequestMethodKey.String("HEAD")
+ // OPTIONS method.
+ // Stability: stable
+ HTTPRequestMethodOptions = HTTPRequestMethodKey.String("OPTIONS")
+ // PATCH method.
+ // Stability: stable
+ HTTPRequestMethodPatch = HTTPRequestMethodKey.String("PATCH")
+ // POST method.
+ // Stability: stable
+ HTTPRequestMethodPost = HTTPRequestMethodKey.String("POST")
+ // PUT method.
+ // Stability: stable
+ HTTPRequestMethodPut = HTTPRequestMethodKey.String("PUT")
+ // TRACE method.
+ // Stability: stable
+ HTTPRequestMethodTrace = HTTPRequestMethodKey.String("TRACE")
+ // Any HTTP method that the instrumentation has no prior knowledge of.
+ // Stability: stable
+ HTTPRequestMethodOther = HTTPRequestMethodKey.String("_OTHER")
+)
+
+// Namespace: hw
+const (
+ // HwBatteryCapacityKey is the attribute Key conforming to the
+ // "hw.battery.capacity" semantic conventions. It represents the design capacity
+ // in Watts-hours or Amper-hours.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9.3Ah", "50Wh"
+ HwBatteryCapacityKey = attribute.Key("hw.battery.capacity")
+
+ // HwBatteryChemistryKey is the attribute Key conforming to the
+ // "hw.battery.chemistry" semantic conventions. It represents the battery
+ // [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Li-ion", "NiMH"
+ //
+ // [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+ HwBatteryChemistryKey = attribute.Key("hw.battery.chemistry")
+
+ // HwBatteryStateKey is the attribute Key conforming to the "hw.battery.state"
+ // semantic conventions. It represents the current state of the battery.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwBatteryStateKey = attribute.Key("hw.battery.state")
+
+ // HwBiosVersionKey is the attribute Key conforming to the "hw.bios_version"
+ // semantic conventions. It represents the BIOS version of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ HwBiosVersionKey = attribute.Key("hw.bios_version")
+
+ // HwDriverVersionKey is the attribute Key conforming to the "hw.driver_version"
+ // semantic conventions. It represents the driver version for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10.2.1-3"
+ HwDriverVersionKey = attribute.Key("hw.driver_version")
+
+ // HwEnclosureTypeKey is the attribute Key conforming to the "hw.enclosure.type"
+ // semantic conventions. It represents the type of the enclosure (useful for
+ // modular systems).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Computer", "Storage", "Switch"
+ HwEnclosureTypeKey = attribute.Key("hw.enclosure.type")
+
+ // HwFirmwareVersionKey is the attribute Key conforming to the
+ // "hw.firmware_version" semantic conventions. It represents the firmware
+ // version of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0.1"
+ HwFirmwareVersionKey = attribute.Key("hw.firmware_version")
+
+ // HwGpuTaskKey is the attribute Key conforming to the "hw.gpu.task" semantic
+ // conventions. It represents the type of task the GPU is performing.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwGpuTaskKey = attribute.Key("hw.gpu.task")
+
+ // HwIDKey is the attribute Key conforming to the "hw.id" semantic conventions.
+ // It represents an identifier for the hardware component, unique within the
+ // monitored host.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "win32battery_battery_testsysa33_1"
+ HwIDKey = attribute.Key("hw.id")
+
+ // HwLimitTypeKey is the attribute Key conforming to the "hw.limit_type"
+ // semantic conventions. It represents the type of limit for hardware
+ // components.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLimitTypeKey = attribute.Key("hw.limit_type")
+
+ // HwLogicalDiskRaidLevelKey is the attribute Key conforming to the
+ // "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+ // Level of the logical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "RAID0+1", "RAID5", "RAID10"
+ HwLogicalDiskRaidLevelKey = attribute.Key("hw.logical_disk.raid_level")
+
+ // HwLogicalDiskStateKey is the attribute Key conforming to the
+ // "hw.logical_disk.state" semantic conventions. It represents the state of the
+ // logical disk space usage.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwLogicalDiskStateKey = attribute.Key("hw.logical_disk.state")
+
+ // HwMemoryTypeKey is the attribute Key conforming to the "hw.memory.type"
+ // semantic conventions. It represents the type of the memory module.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "DDR4", "DDR5", "LPDDR5"
+ HwMemoryTypeKey = attribute.Key("hw.memory.type")
+
+ // HwModelKey is the attribute Key conforming to the "hw.model" semantic
+ // conventions. It represents the descriptive model name of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "PERC H740P", "Intel(R) Core(TM) i7-10700K", "Dell XPS 15 Battery"
+ HwModelKey = attribute.Key("hw.model")
+
+ // HwNameKey is the attribute Key conforming to the "hw.name" semantic
+ // conventions. It represents an easily-recognizable name for the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "eth0"
+ HwNameKey = attribute.Key("hw.name")
+
+ // HwNetworkLogicalAddressesKey is the attribute Key conforming to the
+ // "hw.network.logical_addresses" semantic conventions. It represents the
+ // logical addresses of the adapter (e.g. IP address, or WWPN).
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "172.16.8.21", "57.11.193.42"
+ HwNetworkLogicalAddressesKey = attribute.Key("hw.network.logical_addresses")
+
+ // HwNetworkPhysicalAddressKey is the attribute Key conforming to the
+ // "hw.network.physical_address" semantic conventions. It represents the
+ // physical address of the adapter (e.g. MAC address, or WWNN).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "00-90-F5-E9-7B-36"
+ HwNetworkPhysicalAddressKey = attribute.Key("hw.network.physical_address")
+
+ // HwParentKey is the attribute Key conforming to the "hw.parent" semantic
+ // conventions. It represents the unique identifier of the parent component
+ // (typically the `hw.id` attribute of the enclosure, or disk controller).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "dellStorage_perc_0"
+ HwParentKey = attribute.Key("hw.parent")
+
+ // HwPhysicalDiskSmartAttributeKey is the attribute Key conforming to the
+ // "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+ // [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+ // of the physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Spin Retry Count", "Seek Error Rate", "Raw Read Error Rate"
+ //
+ // [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+ HwPhysicalDiskSmartAttributeKey = attribute.Key("hw.physical_disk.smart_attribute")
+
+ // HwPhysicalDiskStateKey is the attribute Key conforming to the
+ // "hw.physical_disk.state" semantic conventions. It represents the state of the
+ // physical disk endurance utilization.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwPhysicalDiskStateKey = attribute.Key("hw.physical_disk.state")
+
+ // HwPhysicalDiskTypeKey is the attribute Key conforming to the
+ // "hw.physical_disk.type" semantic conventions. It represents the type of the
+ // physical disk.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "HDD", "SSD", "10K"
+ HwPhysicalDiskTypeKey = attribute.Key("hw.physical_disk.type")
+
+ // HwSensorLocationKey is the attribute Key conforming to the
+ // "hw.sensor_location" semantic conventions. It represents the location of the
+ // sensor.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpu0", "ps1", "INLET", "CPU0_DIE", "AMBIENT", "MOTHERBOARD", "PS0
+ // V3_3", "MAIN_12V", "CPU_VCORE"
+ HwSensorLocationKey = attribute.Key("hw.sensor_location")
+
+ // HwSerialNumberKey is the attribute Key conforming to the "hw.serial_number"
+ // semantic conventions. It represents the serial number of the hardware
+ // component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CNFCP0123456789"
+ HwSerialNumberKey = attribute.Key("hw.serial_number")
+
+ // HwStateKey is the attribute Key conforming to the "hw.state" semantic
+ // conventions. It represents the current state of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwStateKey = attribute.Key("hw.state")
+
+ // HwTapeDriveOperationTypeKey is the attribute Key conforming to the
+ // "hw.tape_drive.operation_type" semantic conventions. It represents the type
+ // of tape drive operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ HwTapeDriveOperationTypeKey = attribute.Key("hw.tape_drive.operation_type")
+
+ // HwTypeKey is the attribute Key conforming to the "hw.type" semantic
+ // conventions. It represents the type of the component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: Describes the category of the hardware component for which `hw.state`
+ // is being reported. For example, `hw.type=temperature` along with
+ // `hw.state=degraded` would indicate that the temperature of the hardware
+ // component has been reported as `degraded`.
+ HwTypeKey = attribute.Key("hw.type")
+
+ // HwVendorKey is the attribute Key conforming to the "hw.vendor" semantic
+ // conventions. It represents the vendor name of the hardware component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Dell", "HP", "Intel", "AMD", "LSI", "Lenovo"
+ HwVendorKey = attribute.Key("hw.vendor")
+)
+
+// HwBatteryCapacity returns an attribute KeyValue conforming to the
+// "hw.battery.capacity" semantic conventions. It represents the design capacity
+// in Watts-hours or Amper-hours.
+func HwBatteryCapacity(val string) attribute.KeyValue {
+ return HwBatteryCapacityKey.String(val)
+}
+
+// HwBatteryChemistry returns an attribute KeyValue conforming to the
+// "hw.battery.chemistry" semantic conventions. It represents the battery
+// [chemistry], e.g. Lithium-Ion, Nickel-Cadmium, etc.
+//
+// [chemistry]: https://schemas.dmtf.org/wbem/cim-html/2.31.0/CIM_Battery.html
+func HwBatteryChemistry(val string) attribute.KeyValue {
+ return HwBatteryChemistryKey.String(val)
+}
+
+// HwBiosVersion returns an attribute KeyValue conforming to the
+// "hw.bios_version" semantic conventions. It represents the BIOS version of the
+// hardware component.
+func HwBiosVersion(val string) attribute.KeyValue {
+ return HwBiosVersionKey.String(val)
+}
+
+// HwDriverVersion returns an attribute KeyValue conforming to the
+// "hw.driver_version" semantic conventions. It represents the driver version for
+// the hardware component.
+func HwDriverVersion(val string) attribute.KeyValue {
+ return HwDriverVersionKey.String(val)
+}
+
+// HwEnclosureType returns an attribute KeyValue conforming to the
+// "hw.enclosure.type" semantic conventions. It represents the type of the
+// enclosure (useful for modular systems).
+func HwEnclosureType(val string) attribute.KeyValue {
+ return HwEnclosureTypeKey.String(val)
+}
+
+// HwFirmwareVersion returns an attribute KeyValue conforming to the
+// "hw.firmware_version" semantic conventions. It represents the firmware version
+// of the hardware component.
+func HwFirmwareVersion(val string) attribute.KeyValue {
+ return HwFirmwareVersionKey.String(val)
+}
+
+// HwID returns an attribute KeyValue conforming to the "hw.id" semantic
+// conventions. It represents an identifier for the hardware component, unique
+// within the monitored host.
+func HwID(val string) attribute.KeyValue {
+ return HwIDKey.String(val)
+}
+
+// HwLogicalDiskRaidLevel returns an attribute KeyValue conforming to the
+// "hw.logical_disk.raid_level" semantic conventions. It represents the RAID
+// Level of the logical disk.
+func HwLogicalDiskRaidLevel(val string) attribute.KeyValue {
+ return HwLogicalDiskRaidLevelKey.String(val)
+}
+
+// HwMemoryType returns an attribute KeyValue conforming to the "hw.memory.type"
+// semantic conventions. It represents the type of the memory module.
+func HwMemoryType(val string) attribute.KeyValue {
+ return HwMemoryTypeKey.String(val)
+}
+
+// HwModel returns an attribute KeyValue conforming to the "hw.model" semantic
+// conventions. It represents the descriptive model name of the hardware
+// component.
+func HwModel(val string) attribute.KeyValue {
+ return HwModelKey.String(val)
+}
+
+// HwName returns an attribute KeyValue conforming to the "hw.name" semantic
+// conventions. It represents an easily-recognizable name for the hardware
+// component.
+func HwName(val string) attribute.KeyValue {
+ return HwNameKey.String(val)
+}
+
+// HwNetworkLogicalAddresses returns an attribute KeyValue conforming to the
+// "hw.network.logical_addresses" semantic conventions. It represents the logical
+// addresses of the adapter (e.g. IP address, or WWPN).
+func HwNetworkLogicalAddresses(val ...string) attribute.KeyValue {
+ return HwNetworkLogicalAddressesKey.StringSlice(val)
+}
+
+// HwNetworkPhysicalAddress returns an attribute KeyValue conforming to the
+// "hw.network.physical_address" semantic conventions. It represents the physical
+// address of the adapter (e.g. MAC address, or WWNN).
+func HwNetworkPhysicalAddress(val string) attribute.KeyValue {
+ return HwNetworkPhysicalAddressKey.String(val)
+}
+
+// HwParent returns an attribute KeyValue conforming to the "hw.parent" semantic
+// conventions. It represents the unique identifier of the parent component
+// (typically the `hw.id` attribute of the enclosure, or disk controller).
+func HwParent(val string) attribute.KeyValue {
+ return HwParentKey.String(val)
+}
+
+// HwPhysicalDiskSmartAttribute returns an attribute KeyValue conforming to the
+// "hw.physical_disk.smart_attribute" semantic conventions. It represents the
+// [S.M.A.R.T.] (Self-Monitoring, Analysis, and Reporting Technology) attribute
+// of the physical disk.
+//
+// [S.M.A.R.T.]: https://wikipedia.org/wiki/S.M.A.R.T.
+func HwPhysicalDiskSmartAttribute(val string) attribute.KeyValue {
+ return HwPhysicalDiskSmartAttributeKey.String(val)
+}
+
+// HwPhysicalDiskType returns an attribute KeyValue conforming to the
+// "hw.physical_disk.type" semantic conventions. It represents the type of the
+// physical disk.
+func HwPhysicalDiskType(val string) attribute.KeyValue {
+ return HwPhysicalDiskTypeKey.String(val)
+}
+
+// HwSensorLocation returns an attribute KeyValue conforming to the
+// "hw.sensor_location" semantic conventions. It represents the location of the
+// sensor.
+func HwSensorLocation(val string) attribute.KeyValue {
+ return HwSensorLocationKey.String(val)
+}
+
+// HwSerialNumber returns an attribute KeyValue conforming to the
+// "hw.serial_number" semantic conventions. It represents the serial number of
+// the hardware component.
+func HwSerialNumber(val string) attribute.KeyValue {
+ return HwSerialNumberKey.String(val)
+}
+
+// HwVendor returns an attribute KeyValue conforming to the "hw.vendor" semantic
+// conventions. It represents the vendor name of the hardware component.
+func HwVendor(val string) attribute.KeyValue {
+ return HwVendorKey.String(val)
+}
+
+// Enum values for hw.battery.state
+var (
+ // Charging
+ // Stability: development
+ HwBatteryStateCharging = HwBatteryStateKey.String("charging")
+ // Discharging
+ // Stability: development
+ HwBatteryStateDischarging = HwBatteryStateKey.String("discharging")
+)
+
+// Enum values for hw.gpu.task
+var (
+ // Decoder
+ // Stability: development
+ HwGpuTaskDecoder = HwGpuTaskKey.String("decoder")
+ // Encoder
+ // Stability: development
+ HwGpuTaskEncoder = HwGpuTaskKey.String("encoder")
+ // General
+ // Stability: development
+ HwGpuTaskGeneral = HwGpuTaskKey.String("general")
+)
+
+// Enum values for hw.limit_type
+var (
+ // Critical
+ // Stability: development
+ HwLimitTypeCritical = HwLimitTypeKey.String("critical")
+ // Degraded
+ // Stability: development
+ HwLimitTypeDegraded = HwLimitTypeKey.String("degraded")
+ // High Critical
+ // Stability: development
+ HwLimitTypeHighCritical = HwLimitTypeKey.String("high.critical")
+ // High Degraded
+ // Stability: development
+ HwLimitTypeHighDegraded = HwLimitTypeKey.String("high.degraded")
+ // Low Critical
+ // Stability: development
+ HwLimitTypeLowCritical = HwLimitTypeKey.String("low.critical")
+ // Low Degraded
+ // Stability: development
+ HwLimitTypeLowDegraded = HwLimitTypeKey.String("low.degraded")
+ // Maximum
+ // Stability: development
+ HwLimitTypeMax = HwLimitTypeKey.String("max")
+ // Throttled
+ // Stability: development
+ HwLimitTypeThrottled = HwLimitTypeKey.String("throttled")
+ // Turbo
+ // Stability: development
+ HwLimitTypeTurbo = HwLimitTypeKey.String("turbo")
+)
+
+// Enum values for hw.logical_disk.state
+var (
+ // Used
+ // Stability: development
+ HwLogicalDiskStateUsed = HwLogicalDiskStateKey.String("used")
+ // Free
+ // Stability: development
+ HwLogicalDiskStateFree = HwLogicalDiskStateKey.String("free")
+)
+
+// Enum values for hw.physical_disk.state
+var (
+ // Remaining
+ // Stability: development
+ HwPhysicalDiskStateRemaining = HwPhysicalDiskStateKey.String("remaining")
+)
+
+// Enum values for hw.state
+var (
+ // Degraded
+ // Stability: development
+ HwStateDegraded = HwStateKey.String("degraded")
+ // Failed
+ // Stability: development
+ HwStateFailed = HwStateKey.String("failed")
+ // Needs Cleaning
+ // Stability: development
+ HwStateNeedsCleaning = HwStateKey.String("needs_cleaning")
+ // OK
+ // Stability: development
+ HwStateOk = HwStateKey.String("ok")
+ // Predicted Failure
+ // Stability: development
+ HwStatePredictedFailure = HwStateKey.String("predicted_failure")
+)
+
+// Enum values for hw.tape_drive.operation_type
+var (
+ // Mount
+ // Stability: development
+ HwTapeDriveOperationTypeMount = HwTapeDriveOperationTypeKey.String("mount")
+ // Unmount
+ // Stability: development
+ HwTapeDriveOperationTypeUnmount = HwTapeDriveOperationTypeKey.String("unmount")
+ // Clean
+ // Stability: development
+ HwTapeDriveOperationTypeClean = HwTapeDriveOperationTypeKey.String("clean")
+)
+
+// Enum values for hw.type
+var (
+ // Battery
+ // Stability: development
+ HwTypeBattery = HwTypeKey.String("battery")
+ // CPU
+ // Stability: development
+ HwTypeCPU = HwTypeKey.String("cpu")
+ // Disk controller
+ // Stability: development
+ HwTypeDiskController = HwTypeKey.String("disk_controller")
+ // Enclosure
+ // Stability: development
+ HwTypeEnclosure = HwTypeKey.String("enclosure")
+ // Fan
+ // Stability: development
+ HwTypeFan = HwTypeKey.String("fan")
+ // GPU
+ // Stability: development
+ HwTypeGpu = HwTypeKey.String("gpu")
+ // Logical disk
+ // Stability: development
+ HwTypeLogicalDisk = HwTypeKey.String("logical_disk")
+ // Memory
+ // Stability: development
+ HwTypeMemory = HwTypeKey.String("memory")
+ // Network
+ // Stability: development
+ HwTypeNetwork = HwTypeKey.String("network")
+ // Physical disk
+ // Stability: development
+ HwTypePhysicalDisk = HwTypeKey.String("physical_disk")
+ // Power supply
+ // Stability: development
+ HwTypePowerSupply = HwTypeKey.String("power_supply")
+ // Tape drive
+ // Stability: development
+ HwTypeTapeDrive = HwTypeKey.String("tape_drive")
+ // Temperature
+ // Stability: development
+ HwTypeTemperature = HwTypeKey.String("temperature")
+ // Voltage
+ // Stability: development
+ HwTypeVoltage = HwTypeKey.String("voltage")
+)
+
+// Namespace: ios
+const (
+ // IOSAppStateKey is the attribute Key conforming to the "ios.app.state"
+ // semantic conventions. It represents the this attribute represents the state
+ // of the application.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The iOS lifecycle states are defined in the
+ // [UIApplicationDelegate documentation], and from which the `OS terminology`
+ // column values are derived.
+ //
+ // [UIApplicationDelegate documentation]: https://developer.apple.com/documentation/uikit/uiapplicationdelegate
+ IOSAppStateKey = attribute.Key("ios.app.state")
+)
+
+// Enum values for ios.app.state
+var (
+ // The app has become `active`. Associated with UIKit notification
+ // `applicationDidBecomeActive`.
+ //
+ // Stability: development
+ IOSAppStateActive = IOSAppStateKey.String("active")
+ // The app is now `inactive`. Associated with UIKit notification
+ // `applicationWillResignActive`.
+ //
+ // Stability: development
+ IOSAppStateInactive = IOSAppStateKey.String("inactive")
+ // The app is now in the background. This value is associated with UIKit
+ // notification `applicationDidEnterBackground`.
+ //
+ // Stability: development
+ IOSAppStateBackground = IOSAppStateKey.String("background")
+ // The app is now in the foreground. This value is associated with UIKit
+ // notification `applicationWillEnterForeground`.
+ //
+ // Stability: development
+ IOSAppStateForeground = IOSAppStateKey.String("foreground")
+ // The app is about to terminate. Associated with UIKit notification
+ // `applicationWillTerminate`.
+ //
+ // Stability: development
+ IOSAppStateTerminate = IOSAppStateKey.String("terminate")
+)
+
+// Namespace: k8s
+const (
+ // K8SClusterNameKey is the attribute Key conforming to the "k8s.cluster.name"
+ // semantic conventions. It represents the name of the cluster.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-cluster"
+ K8SClusterNameKey = attribute.Key("k8s.cluster.name")
+
+ // K8SClusterUIDKey is the attribute Key conforming to the "k8s.cluster.uid"
+ // semantic conventions. It represents a pseudo-ID for the cluster, set to the
+ // UID of the `kube-system` namespace.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "218fc5a9-a5f1-4b54-aa05-46717d0ab26d"
+ // Note: K8s doesn't have support for obtaining a cluster ID. If this is ever
+ // added, we will recommend collecting the `k8s.cluster.uid` through the
+ // official APIs. In the meantime, we are able to use the `uid` of the
+ // `kube-system` namespace as a proxy for cluster ID. Read on for the
+ // rationale.
+ //
+ // Every object created in a K8s cluster is assigned a distinct UID. The
+ // `kube-system` namespace is used by Kubernetes itself and will exist
+ // for the lifetime of the cluster. Using the `uid` of the `kube-system`
+ // namespace is a reasonable proxy for the K8s ClusterID as it will only
+ // change if the cluster is rebuilt. Furthermore, Kubernetes UIDs are
+ // UUIDs as standardized by
+ // [ISO/IEC 9834-8 and ITU-T X.667].
+ // Which states:
+ //
+ // > If generated according to one of the mechanisms defined in Rec.
+ // > ITU-T X.667 | ISO/IEC 9834-8, a UUID is either guaranteed to be
+ // > different from all other UUIDs generated before 3603 A.D., or is
+ // > extremely likely to be different (depending on the mechanism chosen).
+ //
+ // Therefore, UIDs between clusters should be extremely unlikely to
+ // conflict.
+ //
+ // [ISO/IEC 9834-8 and ITU-T X.667]: https://www.itu.int/ITU-T/studygroups/com17/oid.html
+ K8SClusterUIDKey = attribute.Key("k8s.cluster.uid")
+
+ // K8SContainerNameKey is the attribute Key conforming to the
+ // "k8s.container.name" semantic conventions. It represents the name of the
+ // Container from Pod specification, must be unique within a Pod. Container
+ // runtime usually uses different globally unique name (`container.name`).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "redis"
+ K8SContainerNameKey = attribute.Key("k8s.container.name")
+
+ // K8SContainerRestartCountKey is the attribute Key conforming to the
+ // "k8s.container.restart_count" semantic conventions. It represents the number
+ // of times the container was restarted. This attribute can be used to identify
+ // a particular container (running or stopped) within a container spec.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ K8SContainerRestartCountKey = attribute.Key("k8s.container.restart_count")
+
+ // K8SContainerStatusLastTerminatedReasonKey is the attribute Key conforming to
+ // the "k8s.container.status.last_terminated_reason" semantic conventions. It
+ // represents the last terminated reason of the Container.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Evicted", "Error"
+ K8SContainerStatusLastTerminatedReasonKey = attribute.Key("k8s.container.status.last_terminated_reason")
+
+ // K8SContainerStatusReasonKey is the attribute Key conforming to the
+ // "k8s.container.status.reason" semantic conventions. It represents the reason
+ // for the container state. Corresponds to the `reason` field of the:
+ // [K8s ContainerStateWaiting] or [K8s ContainerStateTerminated].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ContainerCreating", "CrashLoopBackOff",
+ // "CreateContainerConfigError", "ErrImagePull", "ImagePullBackOff",
+ // "OOMKilled", "Completed", "Error", "ContainerCannotRun"
+ //
+ // [K8s ContainerStateWaiting]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstatewaiting-v1-core
+ // [K8s ContainerStateTerminated]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstateterminated-v1-core
+ K8SContainerStatusReasonKey = attribute.Key("k8s.container.status.reason")
+
+ // K8SContainerStatusStateKey is the attribute Key conforming to the
+ // "k8s.container.status.state" semantic conventions. It represents the state of
+ // the container. [K8s ContainerState].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "terminated", "running", "waiting"
+ //
+ // [K8s ContainerState]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#containerstate-v1-core
+ K8SContainerStatusStateKey = attribute.Key("k8s.container.status.state")
+
+ // K8SCronJobNameKey is the attribute Key conforming to the "k8s.cronjob.name"
+ // semantic conventions. It represents the name of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SCronJobNameKey = attribute.Key("k8s.cronjob.name")
+
+ // K8SCronJobUIDKey is the attribute Key conforming to the "k8s.cronjob.uid"
+ // semantic conventions. It represents the UID of the CronJob.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid")
+
+ // K8SDaemonSetNameKey is the attribute Key conforming to the
+ // "k8s.daemonset.name" semantic conventions. It represents the name of the
+ // DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name")
+
+ // K8SDaemonSetUIDKey is the attribute Key conforming to the "k8s.daemonset.uid"
+ // semantic conventions. It represents the UID of the DaemonSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid")
+
+ // K8SDeploymentNameKey is the attribute Key conforming to the
+ // "k8s.deployment.name" semantic conventions. It represents the name of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SDeploymentNameKey = attribute.Key("k8s.deployment.name")
+
+ // K8SDeploymentUIDKey is the attribute Key conforming to the
+ // "k8s.deployment.uid" semantic conventions. It represents the UID of the
+ // Deployment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid")
+
+ // K8SHPAMetricTypeKey is the attribute Key conforming to the
+ // "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+ // source for the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Resource", "ContainerResource"
+ // Note: This attribute reflects the `type` field of spec.metrics[] in the HPA.
+ K8SHPAMetricTypeKey = attribute.Key("k8s.hpa.metric.type")
+
+ // K8SHPANameKey is the attribute Key conforming to the "k8s.hpa.name" semantic
+ // conventions. It represents the name of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SHPANameKey = attribute.Key("k8s.hpa.name")
+
+ // K8SHPAScaletargetrefAPIVersionKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+ // API version of the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "apps/v1", "autoscaling/v2"
+ // Note: This maps to the `apiVersion` field in the `scaleTargetRef` of the HPA
+ // spec.
+ K8SHPAScaletargetrefAPIVersionKey = attribute.Key("k8s.hpa.scaletargetref.api_version")
+
+ // K8SHPAScaletargetrefKindKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Deployment", "StatefulSet"
+ // Note: This maps to the `kind` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefKindKey = attribute.Key("k8s.hpa.scaletargetref.kind")
+
+ // K8SHPAScaletargetrefNameKey is the attribute Key conforming to the
+ // "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+ // the target resource to scale for the HorizontalPodAutoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-deployment", "my-statefulset"
+ // Note: This maps to the `name` field in the `scaleTargetRef` of the HPA spec.
+ K8SHPAScaletargetrefNameKey = attribute.Key("k8s.hpa.scaletargetref.name")
+
+ // K8SHPAUIDKey is the attribute Key conforming to the "k8s.hpa.uid" semantic
+ // conventions. It represents the UID of the horizontal pod autoscaler.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SHPAUIDKey = attribute.Key("k8s.hpa.uid")
+
+ // K8SHugepageSizeKey is the attribute Key conforming to the "k8s.hugepage.size"
+ // semantic conventions. It represents the size (identifier) of the K8s huge
+ // page.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2Mi"
+ K8SHugepageSizeKey = attribute.Key("k8s.hugepage.size")
+
+ // K8SJobNameKey is the attribute Key conforming to the "k8s.job.name" semantic
+ // conventions. It represents the name of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SJobNameKey = attribute.Key("k8s.job.name")
+
+ // K8SJobUIDKey is the attribute Key conforming to the "k8s.job.uid" semantic
+ // conventions. It represents the UID of the Job.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SJobUIDKey = attribute.Key("k8s.job.uid")
+
+ // K8SNamespaceNameKey is the attribute Key conforming to the
+ // "k8s.namespace.name" semantic conventions. It represents the name of the
+ // namespace that the pod is running in.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "default"
+ K8SNamespaceNameKey = attribute.Key("k8s.namespace.name")
+
+ // K8SNamespacePhaseKey is the attribute Key conforming to the
+ // "k8s.namespace.phase" semantic conventions. It represents the phase of the
+ // K8s namespace.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "active", "terminating"
+ // Note: This attribute aligns with the `phase` field of the
+ // [K8s NamespaceStatus]
+ //
+ // [K8s NamespaceStatus]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#namespacestatus-v1-core
+ K8SNamespacePhaseKey = attribute.Key("k8s.namespace.phase")
+
+ // K8SNodeConditionStatusKey is the attribute Key conforming to the
+ // "k8s.node.condition.status" semantic conventions. It represents the status of
+ // the condition, one of True, False, Unknown.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "true", "false", "unknown"
+ // Note: This attribute aligns with the `status` field of the
+ // [NodeCondition]
+ //
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionStatusKey = attribute.Key("k8s.node.condition.status")
+
+ // K8SNodeConditionTypeKey is the attribute Key conforming to the
+ // "k8s.node.condition.type" semantic conventions. It represents the condition
+ // type of a K8s Node.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Ready", "DiskPressure"
+ // Note: K8s Node conditions as described
+ // by [K8s documentation].
+ //
+ // This attribute aligns with the `type` field of the
+ // [NodeCondition]
+ //
+ // The set of possible values is not limited to those listed here. Managed
+ // Kubernetes environments,
+ // or custom controllers MAY introduce additional node condition types.
+ // When this occurs, the exact value as reported by the Kubernetes API SHOULD be
+ // used.
+ //
+ // [K8s documentation]: https://v1-32.docs.kubernetes.io/docs/reference/node/node-status/#condition
+ // [NodeCondition]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#nodecondition-v1-core
+ K8SNodeConditionTypeKey = attribute.Key("k8s.node.condition.type")
+
+ // K8SNodeNameKey is the attribute Key conforming to the "k8s.node.name"
+ // semantic conventions. It represents the name of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "node-1"
+ K8SNodeNameKey = attribute.Key("k8s.node.name")
+
+ // K8SNodeUIDKey is the attribute Key conforming to the "k8s.node.uid" semantic
+ // conventions. It represents the UID of the Node.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2"
+ K8SNodeUIDKey = attribute.Key("k8s.node.uid")
+
+ // K8SPodNameKey is the attribute Key conforming to the "k8s.pod.name" semantic
+ // conventions. It represents the name of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry-pod-autoconf"
+ K8SPodNameKey = attribute.Key("k8s.pod.name")
+
+ // K8SPodUIDKey is the attribute Key conforming to the "k8s.pod.uid" semantic
+ // conventions. It represents the UID of the Pod.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SPodUIDKey = attribute.Key("k8s.pod.uid")
+
+ // K8SReplicaSetNameKey is the attribute Key conforming to the
+ // "k8s.replicaset.name" semantic conventions. It represents the name of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name")
+
+ // K8SReplicaSetUIDKey is the attribute Key conforming to the
+ // "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+ // ReplicaSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid")
+
+ // K8SReplicationControllerNameKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.name" semantic conventions. It represents the name
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SReplicationControllerNameKey = attribute.Key("k8s.replicationcontroller.name")
+
+ // K8SReplicationControllerUIDKey is the attribute Key conforming to the
+ // "k8s.replicationcontroller.uid" semantic conventions. It represents the UID
+ // of the replication controller.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SReplicationControllerUIDKey = attribute.Key("k8s.replicationcontroller.uid")
+
+ // K8SResourceQuotaNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.name" semantic conventions. It represents the name of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SResourceQuotaNameKey = attribute.Key("k8s.resourcequota.name")
+
+ // K8SResourceQuotaResourceNameKey is the attribute Key conforming to the
+ // "k8s.resourcequota.resource_name" semantic conventions. It represents the
+ // name of the K8s resource a resource quota defines.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "count/replicationcontrollers"
+ // Note: The value for this attribute can be either the full
+ // `count/[.]` string (e.g., count/deployments.apps,
+ // count/pods), or, for certain core Kubernetes resources, just the resource
+ // name (e.g., pods, services, configmaps). Both forms are supported by
+ // Kubernetes for object count quotas. See
+ // [Kubernetes Resource Quotas documentation] for more details.
+ //
+ // [Kubernetes Resource Quotas documentation]: https://kubernetes.io/docs/concepts/policy/resource-quotas/#object-count-quota
+ K8SResourceQuotaResourceNameKey = attribute.Key("k8s.resourcequota.resource_name")
+
+ // K8SResourceQuotaUIDKey is the attribute Key conforming to the
+ // "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+ // resource quota.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SResourceQuotaUIDKey = attribute.Key("k8s.resourcequota.uid")
+
+ // K8SStatefulSetNameKey is the attribute Key conforming to the
+ // "k8s.statefulset.name" semantic conventions. It represents the name of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "opentelemetry"
+ K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name")
+
+ // K8SStatefulSetUIDKey is the attribute Key conforming to the
+ // "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+ // StatefulSet.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "275ecb36-5aa8-4c2a-9c47-d8bb681b9aff"
+ K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid")
+
+ // K8SStorageclassNameKey is the attribute Key conforming to the
+ // "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+ // [StorageClass] object.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "gold.storageclass.storage.k8s.io"
+ //
+ // [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+ K8SStorageclassNameKey = attribute.Key("k8s.storageclass.name")
+
+ // K8SVolumeNameKey is the attribute Key conforming to the "k8s.volume.name"
+ // semantic conventions. It represents the name of the K8s volume.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "volume0"
+ K8SVolumeNameKey = attribute.Key("k8s.volume.name")
+
+ // K8SVolumeTypeKey is the attribute Key conforming to the "k8s.volume.type"
+ // semantic conventions. It represents the type of the K8s volume.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "emptyDir", "persistentVolumeClaim"
+ K8SVolumeTypeKey = attribute.Key("k8s.volume.type")
+)
+
+// K8SClusterName returns an attribute KeyValue conforming to the
+// "k8s.cluster.name" semantic conventions. It represents the name of the
+// cluster.
+func K8SClusterName(val string) attribute.KeyValue {
+ return K8SClusterNameKey.String(val)
+}
+
+// K8SClusterUID returns an attribute KeyValue conforming to the
+// "k8s.cluster.uid" semantic conventions. It represents a pseudo-ID for the
+// cluster, set to the UID of the `kube-system` namespace.
+func K8SClusterUID(val string) attribute.KeyValue {
+ return K8SClusterUIDKey.String(val)
+}
+
+// K8SContainerName returns an attribute KeyValue conforming to the
+// "k8s.container.name" semantic conventions. It represents the name of the
+// Container from Pod specification, must be unique within a Pod. Container
+// runtime usually uses different globally unique name (`container.name`).
+func K8SContainerName(val string) attribute.KeyValue {
+ return K8SContainerNameKey.String(val)
+}
+
+// K8SContainerRestartCount returns an attribute KeyValue conforming to the
+// "k8s.container.restart_count" semantic conventions. It represents the number
+// of times the container was restarted. This attribute can be used to identify a
+// particular container (running or stopped) within a container spec.
+func K8SContainerRestartCount(val int) attribute.KeyValue {
+ return K8SContainerRestartCountKey.Int(val)
+}
+
+// K8SContainerStatusLastTerminatedReason returns an attribute KeyValue
+// conforming to the "k8s.container.status.last_terminated_reason" semantic
+// conventions. It represents the last terminated reason of the Container.
+func K8SContainerStatusLastTerminatedReason(val string) attribute.KeyValue {
+ return K8SContainerStatusLastTerminatedReasonKey.String(val)
+}
+
+// K8SCronJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.cronjob.annotation" semantic conventions. It represents the cronjob
+// annotation placed on the CronJob, the `` being the annotation name, the
+// value being the annotation value.
+func K8SCronJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.annotation."+key, val)
+}
+
+// K8SCronJobLabel returns an attribute KeyValue conforming to the
+// "k8s.cronjob.label" semantic conventions. It represents the label placed on
+// the CronJob, the `` being the label name, the value being the label
+// value.
+func K8SCronJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.cronjob.label."+key, val)
+}
+
+// K8SCronJobName returns an attribute KeyValue conforming to the
+// "k8s.cronjob.name" semantic conventions. It represents the name of the
+// CronJob.
+func K8SCronJobName(val string) attribute.KeyValue {
+ return K8SCronJobNameKey.String(val)
+}
+
+// K8SCronJobUID returns an attribute KeyValue conforming to the
+// "k8s.cronjob.uid" semantic conventions. It represents the UID of the CronJob.
+func K8SCronJobUID(val string) attribute.KeyValue {
+ return K8SCronJobUIDKey.String(val)
+}
+
+// K8SDaemonSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.daemonset.annotation" semantic conventions. It represents the annotation
+// placed on the DaemonSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDaemonSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.annotation."+key, val)
+}
+
+// K8SDaemonSetLabel returns an attribute KeyValue conforming to the
+// "k8s.daemonset.label" semantic conventions. It represents the label placed on
+// the DaemonSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDaemonSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.daemonset.label."+key, val)
+}
+
+// K8SDaemonSetName returns an attribute KeyValue conforming to the
+// "k8s.daemonset.name" semantic conventions. It represents the name of the
+// DaemonSet.
+func K8SDaemonSetName(val string) attribute.KeyValue {
+ return K8SDaemonSetNameKey.String(val)
+}
+
+// K8SDaemonSetUID returns an attribute KeyValue conforming to the
+// "k8s.daemonset.uid" semantic conventions. It represents the UID of the
+// DaemonSet.
+func K8SDaemonSetUID(val string) attribute.KeyValue {
+ return K8SDaemonSetUIDKey.String(val)
+}
+
+// K8SDeploymentAnnotation returns an attribute KeyValue conforming to the
+// "k8s.deployment.annotation" semantic conventions. It represents the annotation
+// placed on the Deployment, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SDeploymentAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.annotation."+key, val)
+}
+
+// K8SDeploymentLabel returns an attribute KeyValue conforming to the
+// "k8s.deployment.label" semantic conventions. It represents the label placed on
+// the Deployment, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SDeploymentLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.deployment.label."+key, val)
+}
+
+// K8SDeploymentName returns an attribute KeyValue conforming to the
+// "k8s.deployment.name" semantic conventions. It represents the name of the
+// Deployment.
+func K8SDeploymentName(val string) attribute.KeyValue {
+ return K8SDeploymentNameKey.String(val)
+}
+
+// K8SDeploymentUID returns an attribute KeyValue conforming to the
+// "k8s.deployment.uid" semantic conventions. It represents the UID of the
+// Deployment.
+func K8SDeploymentUID(val string) attribute.KeyValue {
+ return K8SDeploymentUIDKey.String(val)
+}
+
+// K8SHPAMetricType returns an attribute KeyValue conforming to the
+// "k8s.hpa.metric.type" semantic conventions. It represents the type of metric
+// source for the horizontal pod autoscaler.
+func K8SHPAMetricType(val string) attribute.KeyValue {
+ return K8SHPAMetricTypeKey.String(val)
+}
+
+// K8SHPAName returns an attribute KeyValue conforming to the "k8s.hpa.name"
+// semantic conventions. It represents the name of the horizontal pod autoscaler.
+func K8SHPAName(val string) attribute.KeyValue {
+ return K8SHPANameKey.String(val)
+}
+
+// K8SHPAScaletargetrefAPIVersion returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.api_version" semantic conventions. It represents the
+// API version of the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefAPIVersion(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefAPIVersionKey.String(val)
+}
+
+// K8SHPAScaletargetrefKind returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.kind" semantic conventions. It represents the kind of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefKind(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefKindKey.String(val)
+}
+
+// K8SHPAScaletargetrefName returns an attribute KeyValue conforming to the
+// "k8s.hpa.scaletargetref.name" semantic conventions. It represents the name of
+// the target resource to scale for the HorizontalPodAutoscaler.
+func K8SHPAScaletargetrefName(val string) attribute.KeyValue {
+ return K8SHPAScaletargetrefNameKey.String(val)
+}
+
+// K8SHPAUID returns an attribute KeyValue conforming to the "k8s.hpa.uid"
+// semantic conventions. It represents the UID of the horizontal pod autoscaler.
+func K8SHPAUID(val string) attribute.KeyValue {
+ return K8SHPAUIDKey.String(val)
+}
+
+// K8SHugepageSize returns an attribute KeyValue conforming to the
+// "k8s.hugepage.size" semantic conventions. It represents the size (identifier)
+// of the K8s huge page.
+func K8SHugepageSize(val string) attribute.KeyValue {
+ return K8SHugepageSizeKey.String(val)
+}
+
+// K8SJobAnnotation returns an attribute KeyValue conforming to the
+// "k8s.job.annotation" semantic conventions. It represents the annotation placed
+// on the Job, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SJobAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.annotation."+key, val)
+}
+
+// K8SJobLabel returns an attribute KeyValue conforming to the "k8s.job.label"
+// semantic conventions. It represents the label placed on the Job, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SJobLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.job.label."+key, val)
+}
+
+// K8SJobName returns an attribute KeyValue conforming to the "k8s.job.name"
+// semantic conventions. It represents the name of the Job.
+func K8SJobName(val string) attribute.KeyValue {
+ return K8SJobNameKey.String(val)
+}
+
+// K8SJobUID returns an attribute KeyValue conforming to the "k8s.job.uid"
+// semantic conventions. It represents the UID of the Job.
+func K8SJobUID(val string) attribute.KeyValue {
+ return K8SJobUIDKey.String(val)
+}
+
+// K8SNamespaceAnnotation returns an attribute KeyValue conforming to the
+// "k8s.namespace.annotation" semantic conventions. It represents the annotation
+// placed on the Namespace, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SNamespaceAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.annotation."+key, val)
+}
+
+// K8SNamespaceLabel returns an attribute KeyValue conforming to the
+// "k8s.namespace.label" semantic conventions. It represents the label placed on
+// the Namespace, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SNamespaceLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.namespace.label."+key, val)
+}
+
+// K8SNamespaceName returns an attribute KeyValue conforming to the
+// "k8s.namespace.name" semantic conventions. It represents the name of the
+// namespace that the pod is running in.
+func K8SNamespaceName(val string) attribute.KeyValue {
+ return K8SNamespaceNameKey.String(val)
+}
+
+// K8SNodeAnnotation returns an attribute KeyValue conforming to the
+// "k8s.node.annotation" semantic conventions. It represents the annotation
+// placed on the Node, the `` being the annotation name, the value being the
+// annotation value, even if the value is empty.
+func K8SNodeAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.annotation."+key, val)
+}
+
+// K8SNodeLabel returns an attribute KeyValue conforming to the "k8s.node.label"
+// semantic conventions. It represents the label placed on the Node, the ``
+// being the label name, the value being the label value, even if the value is
+// empty.
+func K8SNodeLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.node.label."+key, val)
+}
+
+// K8SNodeName returns an attribute KeyValue conforming to the "k8s.node.name"
+// semantic conventions. It represents the name of the Node.
+func K8SNodeName(val string) attribute.KeyValue {
+ return K8SNodeNameKey.String(val)
+}
+
+// K8SNodeUID returns an attribute KeyValue conforming to the "k8s.node.uid"
+// semantic conventions. It represents the UID of the Node.
+func K8SNodeUID(val string) attribute.KeyValue {
+ return K8SNodeUIDKey.String(val)
+}
+
+// K8SPodAnnotation returns an attribute KeyValue conforming to the
+// "k8s.pod.annotation" semantic conventions. It represents the annotation placed
+// on the Pod, the `` being the annotation name, the value being the
+// annotation value.
+func K8SPodAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.annotation."+key, val)
+}
+
+// K8SPodLabel returns an attribute KeyValue conforming to the "k8s.pod.label"
+// semantic conventions. It represents the label placed on the Pod, the ``
+// being the label name, the value being the label value.
+func K8SPodLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.pod.label."+key, val)
+}
+
+// K8SPodName returns an attribute KeyValue conforming to the "k8s.pod.name"
+// semantic conventions. It represents the name of the Pod.
+func K8SPodName(val string) attribute.KeyValue {
+ return K8SPodNameKey.String(val)
+}
+
+// K8SPodUID returns an attribute KeyValue conforming to the "k8s.pod.uid"
+// semantic conventions. It represents the UID of the Pod.
+func K8SPodUID(val string) attribute.KeyValue {
+ return K8SPodUIDKey.String(val)
+}
+
+// K8SReplicaSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.replicaset.annotation" semantic conventions. It represents the annotation
+// placed on the ReplicaSet, the `` being the annotation name, the value
+// being the annotation value, even if the value is empty.
+func K8SReplicaSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.annotation."+key, val)
+}
+
+// K8SReplicaSetLabel returns an attribute KeyValue conforming to the
+// "k8s.replicaset.label" semantic conventions. It represents the label placed on
+// the ReplicaSet, the `` being the label name, the value being the label
+// value, even if the value is empty.
+func K8SReplicaSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.replicaset.label."+key, val)
+}
+
+// K8SReplicaSetName returns an attribute KeyValue conforming to the
+// "k8s.replicaset.name" semantic conventions. It represents the name of the
+// ReplicaSet.
+func K8SReplicaSetName(val string) attribute.KeyValue {
+ return K8SReplicaSetNameKey.String(val)
+}
+
+// K8SReplicaSetUID returns an attribute KeyValue conforming to the
+// "k8s.replicaset.uid" semantic conventions. It represents the UID of the
+// ReplicaSet.
+func K8SReplicaSetUID(val string) attribute.KeyValue {
+ return K8SReplicaSetUIDKey.String(val)
+}
+
+// K8SReplicationControllerName returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.name" semantic conventions. It represents the name
+// of the replication controller.
+func K8SReplicationControllerName(val string) attribute.KeyValue {
+ return K8SReplicationControllerNameKey.String(val)
+}
+
+// K8SReplicationControllerUID returns an attribute KeyValue conforming to the
+// "k8s.replicationcontroller.uid" semantic conventions. It represents the UID of
+// the replication controller.
+func K8SReplicationControllerUID(val string) attribute.KeyValue {
+ return K8SReplicationControllerUIDKey.String(val)
+}
+
+// K8SResourceQuotaName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.name" semantic conventions. It represents the name of the
+// resource quota.
+func K8SResourceQuotaName(val string) attribute.KeyValue {
+ return K8SResourceQuotaNameKey.String(val)
+}
+
+// K8SResourceQuotaResourceName returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.resource_name" semantic conventions. It represents the name
+// of the K8s resource a resource quota defines.
+func K8SResourceQuotaResourceName(val string) attribute.KeyValue {
+ return K8SResourceQuotaResourceNameKey.String(val)
+}
+
+// K8SResourceQuotaUID returns an attribute KeyValue conforming to the
+// "k8s.resourcequota.uid" semantic conventions. It represents the UID of the
+// resource quota.
+func K8SResourceQuotaUID(val string) attribute.KeyValue {
+ return K8SResourceQuotaUIDKey.String(val)
+}
+
+// K8SStatefulSetAnnotation returns an attribute KeyValue conforming to the
+// "k8s.statefulset.annotation" semantic conventions. It represents the
+// annotation placed on the StatefulSet, the `` being the annotation name,
+// the value being the annotation value, even if the value is empty.
+func K8SStatefulSetAnnotation(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.annotation."+key, val)
+}
+
+// K8SStatefulSetLabel returns an attribute KeyValue conforming to the
+// "k8s.statefulset.label" semantic conventions. It represents the label placed
+// on the StatefulSet, the `` being the label name, the value being the
+// label value, even if the value is empty.
+func K8SStatefulSetLabel(key string, val string) attribute.KeyValue {
+ return attribute.String("k8s.statefulset.label."+key, val)
+}
+
+// K8SStatefulSetName returns an attribute KeyValue conforming to the
+// "k8s.statefulset.name" semantic conventions. It represents the name of the
+// StatefulSet.
+func K8SStatefulSetName(val string) attribute.KeyValue {
+ return K8SStatefulSetNameKey.String(val)
+}
+
+// K8SStatefulSetUID returns an attribute KeyValue conforming to the
+// "k8s.statefulset.uid" semantic conventions. It represents the UID of the
+// StatefulSet.
+func K8SStatefulSetUID(val string) attribute.KeyValue {
+ return K8SStatefulSetUIDKey.String(val)
+}
+
+// K8SStorageclassName returns an attribute KeyValue conforming to the
+// "k8s.storageclass.name" semantic conventions. It represents the name of K8s
+// [StorageClass] object.
+//
+// [StorageClass]: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.30/#storageclass-v1-storage-k8s-io
+func K8SStorageclassName(val string) attribute.KeyValue {
+ return K8SStorageclassNameKey.String(val)
+}
+
+// K8SVolumeName returns an attribute KeyValue conforming to the
+// "k8s.volume.name" semantic conventions. It represents the name of the K8s
+// volume.
+func K8SVolumeName(val string) attribute.KeyValue {
+ return K8SVolumeNameKey.String(val)
+}
+
+// Enum values for k8s.container.status.reason
+var (
+ // The container is being created.
+ // Stability: development
+ K8SContainerStatusReasonContainerCreating = K8SContainerStatusReasonKey.String("ContainerCreating")
+ // The container is in a crash loop back off state.
+ // Stability: development
+ K8SContainerStatusReasonCrashLoopBackOff = K8SContainerStatusReasonKey.String("CrashLoopBackOff")
+ // There was an error creating the container configuration.
+ // Stability: development
+ K8SContainerStatusReasonCreateContainerConfigError = K8SContainerStatusReasonKey.String("CreateContainerConfigError")
+ // There was an error pulling the container image.
+ // Stability: development
+ K8SContainerStatusReasonErrImagePull = K8SContainerStatusReasonKey.String("ErrImagePull")
+ // The container image pull is in back off state.
+ // Stability: development
+ K8SContainerStatusReasonImagePullBackOff = K8SContainerStatusReasonKey.String("ImagePullBackOff")
+ // The container was killed due to out of memory.
+ // Stability: development
+ K8SContainerStatusReasonOomKilled = K8SContainerStatusReasonKey.String("OOMKilled")
+ // The container has completed execution.
+ // Stability: development
+ K8SContainerStatusReasonCompleted = K8SContainerStatusReasonKey.String("Completed")
+ // There was an error with the container.
+ // Stability: development
+ K8SContainerStatusReasonError = K8SContainerStatusReasonKey.String("Error")
+ // The container cannot run.
+ // Stability: development
+ K8SContainerStatusReasonContainerCannotRun = K8SContainerStatusReasonKey.String("ContainerCannotRun")
+)
+
+// Enum values for k8s.container.status.state
+var (
+ // The container has terminated.
+ // Stability: development
+ K8SContainerStatusStateTerminated = K8SContainerStatusStateKey.String("terminated")
+ // The container is running.
+ // Stability: development
+ K8SContainerStatusStateRunning = K8SContainerStatusStateKey.String("running")
+ // The container is waiting.
+ // Stability: development
+ K8SContainerStatusStateWaiting = K8SContainerStatusStateKey.String("waiting")
+)
+
+// Enum values for k8s.namespace.phase
+var (
+ // Active namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseActive = K8SNamespacePhaseKey.String("active")
+ // Terminating namespace phase as described by [K8s API]
+ // Stability: development
+ //
+ // [K8s API]: https://pkg.go.dev/k8s.io/api@v0.31.3/core/v1#NamespacePhase
+ K8SNamespacePhaseTerminating = K8SNamespacePhaseKey.String("terminating")
+)
+
+// Enum values for k8s.node.condition.status
+var (
+ // condition_true
+ // Stability: development
+ K8SNodeConditionStatusConditionTrue = K8SNodeConditionStatusKey.String("true")
+ // condition_false
+ // Stability: development
+ K8SNodeConditionStatusConditionFalse = K8SNodeConditionStatusKey.String("false")
+ // condition_unknown
+ // Stability: development
+ K8SNodeConditionStatusConditionUnknown = K8SNodeConditionStatusKey.String("unknown")
+)
+
+// Enum values for k8s.node.condition.type
+var (
+ // The node is healthy and ready to accept pods
+ // Stability: development
+ K8SNodeConditionTypeReady = K8SNodeConditionTypeKey.String("Ready")
+ // Pressure exists on the disk size—that is, if the disk capacity is low
+ // Stability: development
+ K8SNodeConditionTypeDiskPressure = K8SNodeConditionTypeKey.String("DiskPressure")
+ // Pressure exists on the node memory—that is, if the node memory is low
+ // Stability: development
+ K8SNodeConditionTypeMemoryPressure = K8SNodeConditionTypeKey.String("MemoryPressure")
+ // Pressure exists on the processes—that is, if there are too many processes
+ // on the node
+ // Stability: development
+ K8SNodeConditionTypePIDPressure = K8SNodeConditionTypeKey.String("PIDPressure")
+ // The network for the node is not correctly configured
+ // Stability: development
+ K8SNodeConditionTypeNetworkUnavailable = K8SNodeConditionTypeKey.String("NetworkUnavailable")
+)
+
+// Enum values for k8s.volume.type
+var (
+ // A [persistentVolumeClaim] volume
+ // Stability: development
+ //
+ // [persistentVolumeClaim]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#persistentvolumeclaim
+ K8SVolumeTypePersistentVolumeClaim = K8SVolumeTypeKey.String("persistentVolumeClaim")
+ // A [configMap] volume
+ // Stability: development
+ //
+ // [configMap]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#configmap
+ K8SVolumeTypeConfigMap = K8SVolumeTypeKey.String("configMap")
+ // A [downwardAPI] volume
+ // Stability: development
+ //
+ // [downwardAPI]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#downwardapi
+ K8SVolumeTypeDownwardAPI = K8SVolumeTypeKey.String("downwardAPI")
+ // An [emptyDir] volume
+ // Stability: development
+ //
+ // [emptyDir]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#emptydir
+ K8SVolumeTypeEmptyDir = K8SVolumeTypeKey.String("emptyDir")
+ // A [secret] volume
+ // Stability: development
+ //
+ // [secret]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#secret
+ K8SVolumeTypeSecret = K8SVolumeTypeKey.String("secret")
+ // A [local] volume
+ // Stability: development
+ //
+ // [local]: https://v1-30.docs.kubernetes.io/docs/concepts/storage/volumes/#local
+ K8SVolumeTypeLocal = K8SVolumeTypeKey.String("local")
+)
+
+// Namespace: linux
+const (
+ // LinuxMemorySlabStateKey is the attribute Key conforming to the
+ // "linux.memory.slab.state" semantic conventions. It represents the Linux Slab
+ // memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "reclaimable", "unreclaimable"
+ LinuxMemorySlabStateKey = attribute.Key("linux.memory.slab.state")
+)
+
+// Enum values for linux.memory.slab.state
+var (
+ // reclaimable
+ // Stability: development
+ LinuxMemorySlabStateReclaimable = LinuxMemorySlabStateKey.String("reclaimable")
+ // unreclaimable
+ // Stability: development
+ LinuxMemorySlabStateUnreclaimable = LinuxMemorySlabStateKey.String("unreclaimable")
+)
+
+// Namespace: log
+const (
+ // LogFileNameKey is the attribute Key conforming to the "log.file.name"
+ // semantic conventions. It represents the basename of the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "audit.log"
+ LogFileNameKey = attribute.Key("log.file.name")
+
+ // LogFileNameResolvedKey is the attribute Key conforming to the
+ // "log.file.name_resolved" semantic conventions. It represents the basename of
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "uuid.log"
+ LogFileNameResolvedKey = attribute.Key("log.file.name_resolved")
+
+ // LogFilePathKey is the attribute Key conforming to the "log.file.path"
+ // semantic conventions. It represents the full path to the file.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/log/mysql/audit.log"
+ LogFilePathKey = attribute.Key("log.file.path")
+
+ // LogFilePathResolvedKey is the attribute Key conforming to the
+ // "log.file.path_resolved" semantic conventions. It represents the full path to
+ // the file, with symlinks resolved.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/var/lib/docker/uuid.log"
+ LogFilePathResolvedKey = attribute.Key("log.file.path_resolved")
+
+ // LogIostreamKey is the attribute Key conforming to the "log.iostream" semantic
+ // conventions. It represents the stream associated with the log. See below for
+ // a list of well-known values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ LogIostreamKey = attribute.Key("log.iostream")
+
+ // LogRecordOriginalKey is the attribute Key conforming to the
+ // "log.record.original" semantic conventions. It represents the complete
+ // original Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "77 <86>1 2015-08-06T21:58:59.694Z 192.168.2.133 inactive - - -
+ // Something happened", "[INFO] 8/3/24 12:34:56 Something happened"
+ // Note: This value MAY be added when processing a Log Record which was
+ // originally transmitted as a string or equivalent data type AND the Body field
+ // of the Log Record does not contain the same value. (e.g. a syslog or a log
+ // record read from a file.)
+ LogRecordOriginalKey = attribute.Key("log.record.original")
+
+ // LogRecordUIDKey is the attribute Key conforming to the "log.record.uid"
+ // semantic conventions. It represents a unique identifier for the Log Record.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "01ARZ3NDEKTSV4RRFFQ69G5FAV"
+ // Note: If an id is provided, other log records with the same id will be
+ // considered duplicates and can be removed safely. This means, that two
+ // distinguishable log records MUST have different values.
+ // The id MAY be an
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)], but other
+ // identifiers (e.g. UUID) may be used as needed.
+ //
+ // [Universally Unique Lexicographically Sortable Identifier (ULID)]: https://github.com/ulid/spec
+ LogRecordUIDKey = attribute.Key("log.record.uid")
+)
+
+// LogFileName returns an attribute KeyValue conforming to the "log.file.name"
+// semantic conventions. It represents the basename of the file.
+func LogFileName(val string) attribute.KeyValue {
+ return LogFileNameKey.String(val)
+}
+
+// LogFileNameResolved returns an attribute KeyValue conforming to the
+// "log.file.name_resolved" semantic conventions. It represents the basename of
+// the file, with symlinks resolved.
+func LogFileNameResolved(val string) attribute.KeyValue {
+ return LogFileNameResolvedKey.String(val)
+}
+
+// LogFilePath returns an attribute KeyValue conforming to the "log.file.path"
+// semantic conventions. It represents the full path to the file.
+func LogFilePath(val string) attribute.KeyValue {
+ return LogFilePathKey.String(val)
+}
+
+// LogFilePathResolved returns an attribute KeyValue conforming to the
+// "log.file.path_resolved" semantic conventions. It represents the full path to
+// the file, with symlinks resolved.
+func LogFilePathResolved(val string) attribute.KeyValue {
+ return LogFilePathResolvedKey.String(val)
+}
+
+// LogRecordOriginal returns an attribute KeyValue conforming to the
+// "log.record.original" semantic conventions. It represents the complete
+// original Log Record.
+func LogRecordOriginal(val string) attribute.KeyValue {
+ return LogRecordOriginalKey.String(val)
+}
+
+// LogRecordUID returns an attribute KeyValue conforming to the "log.record.uid"
+// semantic conventions. It represents a unique identifier for the Log Record.
+func LogRecordUID(val string) attribute.KeyValue {
+ return LogRecordUIDKey.String(val)
+}
+
+// Enum values for log.iostream
+var (
+ // Logs from stdout stream
+ // Stability: development
+ LogIostreamStdout = LogIostreamKey.String("stdout")
+ // Events from stderr stream
+ // Stability: development
+ LogIostreamStderr = LogIostreamKey.String("stderr")
+)
+
+// Namespace: mainframe
+const (
+ // MainframeLparNameKey is the attribute Key conforming to the
+ // "mainframe.lpar.name" semantic conventions. It represents the name of the
+ // logical partition that hosts a systems with a mainframe operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "LPAR01"
+ MainframeLparNameKey = attribute.Key("mainframe.lpar.name")
+)
+
+// MainframeLparName returns an attribute KeyValue conforming to the
+// "mainframe.lpar.name" semantic conventions. It represents the name of the
+// logical partition that hosts a systems with a mainframe operating system.
+func MainframeLparName(val string) attribute.KeyValue {
+ return MainframeLparNameKey.String(val)
+}
+
+// Namespace: messaging
+const (
+ // MessagingBatchMessageCountKey is the attribute Key conforming to the
+ // "messaging.batch.message_count" semantic conventions. It represents the
+ // number of messages sent, received, or processed in the scope of the batching
+ // operation.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 0, 1, 2
+ // Note: Instrumentations SHOULD NOT set `messaging.batch.message_count` on
+ // spans that operate with a single message. When a messaging client library
+ // supports both batch and single-message API for the same operation,
+ // instrumentations SHOULD use `messaging.batch.message_count` for batching APIs
+ // and SHOULD NOT use it for single-message APIs.
+ MessagingBatchMessageCountKey = attribute.Key("messaging.batch.message_count")
+
+ // MessagingClientIDKey is the attribute Key conforming to the
+ // "messaging.client.id" semantic conventions. It represents a unique identifier
+ // for the client that consumes or produces a message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "client-5", "myhost@8742@s8083jm"
+ MessagingClientIDKey = attribute.Key("messaging.client.id")
+
+ // MessagingConsumerGroupNameKey is the attribute Key conforming to the
+ // "messaging.consumer.group.name" semantic conventions. It represents the name
+ // of the consumer group with which a consumer is associated.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-group", "indexer"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.consumer.group.name` is applicable and what it means in
+ // the context of that system.
+ MessagingConsumerGroupNameKey = attribute.Key("messaging.consumer.group.name")
+
+ // MessagingDestinationAnonymousKey is the attribute Key conforming to the
+ // "messaging.destination.anonymous" semantic conventions. It represents a
+ // boolean that is true if the message destination is anonymous (could be
+ // unnamed or have auto-generated name).
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationAnonymousKey = attribute.Key("messaging.destination.anonymous")
+
+ // MessagingDestinationNameKey is the attribute Key conforming to the
+ // "messaging.destination.name" semantic conventions. It represents the message
+ // destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MyQueue", "MyTopic"
+ // Note: Destination name SHOULD uniquely identify a specific queue, topic or
+ // other entity within the broker. If
+ // the broker doesn't have such notion, the destination name SHOULD uniquely
+ // identify the broker.
+ MessagingDestinationNameKey = attribute.Key("messaging.destination.name")
+
+ // MessagingDestinationPartitionIDKey is the attribute Key conforming to the
+ // "messaging.destination.partition.id" semantic conventions. It represents the
+ // identifier of the partition messages are sent to or received from, unique
+ // within the `messaging.destination.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ MessagingDestinationPartitionIDKey = attribute.Key("messaging.destination.partition.id")
+
+ // MessagingDestinationSubscriptionNameKey is the attribute Key conforming to
+ // the "messaging.destination.subscription.name" semantic conventions. It
+ // represents the name of the destination subscription from which a message is
+ // consumed.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "subscription-a"
+ // Note: Semantic conventions for individual messaging systems SHOULD document
+ // whether `messaging.destination.subscription.name` is applicable and what it
+ // means in the context of that system.
+ MessagingDestinationSubscriptionNameKey = attribute.Key("messaging.destination.subscription.name")
+
+ // MessagingDestinationTemplateKey is the attribute Key conforming to the
+ // "messaging.destination.template" semantic conventions. It represents the low
+ // cardinality representation of the messaging destination name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/customers/{customerId}"
+ // Note: Destination names could be constructed from templates. An example would
+ // be a destination name involving a user name or product id. Although the
+ // destination name in this case is of high cardinality, the underlying template
+ // is of low cardinality and can be effectively used for grouping and
+ // aggregation.
+ MessagingDestinationTemplateKey = attribute.Key("messaging.destination.template")
+
+ // MessagingDestinationTemporaryKey is the attribute Key conforming to the
+ // "messaging.destination.temporary" semantic conventions. It represents a
+ // boolean that is true if the message destination is temporary and might not
+ // exist anymore after messages are processed.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingDestinationTemporaryKey = attribute.Key("messaging.destination.temporary")
+
+ // MessagingEventHubsMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingEventHubsMessageEnqueuedTimeKey = attribute.Key("messaging.eventhubs.message.enqueued_time")
+
+ // MessagingGCPPubSubMessageAckDeadlineKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+ // represents the ack deadline in seconds set for the modify ack deadline
+ // request.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageAckDeadlineKey = attribute.Key("messaging.gcp_pubsub.message.ack_deadline")
+
+ // MessagingGCPPubSubMessageAckIDKey is the attribute Key conforming to the
+ // "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+ // ack id for a given message.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ack_id
+ MessagingGCPPubSubMessageAckIDKey = attribute.Key("messaging.gcp_pubsub.message.ack_id")
+
+ // MessagingGCPPubSubMessageDeliveryAttemptKey is the attribute Key conforming
+ // to the "messaging.gcp_pubsub.message.delivery_attempt" semantic conventions.
+ // It represents the delivery attempt for a given message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingGCPPubSubMessageDeliveryAttemptKey = attribute.Key("messaging.gcp_pubsub.message.delivery_attempt")
+
+ // MessagingGCPPubSubMessageOrderingKeyKey is the attribute Key conforming to
+ // the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+ // represents the ordering key for a given message. If the attribute is not
+ // present, the message does not have an ordering key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: ordering_key
+ MessagingGCPPubSubMessageOrderingKeyKey = attribute.Key("messaging.gcp_pubsub.message.ordering_key")
+
+ // MessagingKafkaMessageKeyKey is the attribute Key conforming to the
+ // "messaging.kafka.message.key" semantic conventions. It represents the message
+ // keys in Kafka are used for grouping alike messages to ensure they're
+ // processed on the same partition. They differ from `messaging.message.id` in
+ // that they're not unique. If the key is `null`, the attribute MUST NOT be set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ // Note: If the key type is not string, it's string representation has to be
+ // supplied for the attribute. If the key has no unambiguous, canonical string
+ // form, don't include its value.
+ MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message.key")
+
+ // MessagingKafkaMessageTombstoneKey is the attribute Key conforming to the
+ // "messaging.kafka.message.tombstone" semantic conventions. It represents a
+ // boolean that is true if the message is a tombstone.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingKafkaMessageTombstoneKey = attribute.Key("messaging.kafka.message.tombstone")
+
+ // MessagingKafkaOffsetKey is the attribute Key conforming to the
+ // "messaging.kafka.offset" semantic conventions. It represents the offset of a
+ // record in the corresponding Kafka partition.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingKafkaOffsetKey = attribute.Key("messaging.kafka.offset")
+
+ // MessagingMessageBodySizeKey is the attribute Key conforming to the
+ // "messaging.message.body.size" semantic conventions. It represents the size of
+ // the message body in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed body size. If
+ // both sizes are known, the uncompressed
+ // body size should be used.
+ MessagingMessageBodySizeKey = attribute.Key("messaging.message.body.size")
+
+ // MessagingMessageConversationIDKey is the attribute Key conforming to the
+ // "messaging.message.conversation_id" semantic conventions. It represents the
+ // conversation ID identifying the conversation to which the message belongs,
+ // represented as a string. Sometimes called "Correlation ID".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: MyConversationId
+ MessagingMessageConversationIDKey = attribute.Key("messaging.message.conversation_id")
+
+ // MessagingMessageEnvelopeSizeKey is the attribute Key conforming to the
+ // "messaging.message.envelope.size" semantic conventions. It represents the
+ // size of the message body and metadata in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Note: This can refer to both the compressed or uncompressed size. If both
+ // sizes are known, the uncompressed
+ // size should be used.
+ MessagingMessageEnvelopeSizeKey = attribute.Key("messaging.message.envelope.size")
+
+ // MessagingMessageIDKey is the attribute Key conforming to the
+ // "messaging.message.id" semantic conventions. It represents a value used by
+ // the messaging system as an identifier for the message, represented as a
+ // string.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 452a7c7c7c7048c2f887f61572b18fc2
+ MessagingMessageIDKey = attribute.Key("messaging.message.id")
+
+ // MessagingOperationNameKey is the attribute Key conforming to the
+ // "messaging.operation.name" semantic conventions. It represents the
+ // system-specific name of the messaging operation.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ack", "nack", "send"
+ MessagingOperationNameKey = attribute.Key("messaging.operation.name")
+
+ // MessagingOperationTypeKey is the attribute Key conforming to the
+ // "messaging.operation.type" semantic conventions. It represents a string
+ // identifying the type of the messaging operation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: If a custom value is used, it MUST be of low cardinality.
+ MessagingOperationTypeKey = attribute.Key("messaging.operation.type")
+
+ // MessagingRabbitMQDestinationRoutingKeyKey is the attribute Key conforming to
+ // the "messaging.rabbitmq.destination.routing_key" semantic conventions. It
+ // represents the rabbitMQ message routing key.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myKey
+ MessagingRabbitMQDestinationRoutingKeyKey = attribute.Key("messaging.rabbitmq.destination.routing_key")
+
+ // MessagingRabbitMQMessageDeliveryTagKey is the attribute Key conforming to the
+ // "messaging.rabbitmq.message.delivery_tag" semantic conventions. It represents
+ // the rabbitMQ message delivery tag.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRabbitMQMessageDeliveryTagKey = attribute.Key("messaging.rabbitmq.message.delivery_tag")
+
+ // MessagingRocketMQConsumptionModelKey is the attribute Key conforming to the
+ // "messaging.rocketmq.consumption_model" semantic conventions. It represents
+ // the model of message consumption. This only applies to consumer spans.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQConsumptionModelKey = attribute.Key("messaging.rocketmq.consumption_model")
+
+ // MessagingRocketMQMessageDelayTimeLevelKey is the attribute Key conforming to
+ // the "messaging.rocketmq.message.delay_time_level" semantic conventions. It
+ // represents the delay time level for delay message, which determines the
+ // message delay time.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDelayTimeLevelKey = attribute.Key("messaging.rocketmq.message.delay_time_level")
+
+ // MessagingRocketMQMessageDeliveryTimestampKey is the attribute Key conforming
+ // to the "messaging.rocketmq.message.delivery_timestamp" semantic conventions.
+ // It represents the timestamp in milliseconds that the delay message is
+ // expected to be delivered to consumer.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingRocketMQMessageDeliveryTimestampKey = attribute.Key("messaging.rocketmq.message.delivery_timestamp")
+
+ // MessagingRocketMQMessageGroupKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.group" semantic conventions. It represents the it
+ // is essential for FIFO message. Messages that belong to the same message group
+ // are always processed one by one within the same consumer group.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myMessageGroup
+ MessagingRocketMQMessageGroupKey = attribute.Key("messaging.rocketmq.message.group")
+
+ // MessagingRocketMQMessageKeysKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.keys" semantic conventions. It represents the
+ // key(s) of message, another way to mark message besides message id.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "keyA", "keyB"
+ MessagingRocketMQMessageKeysKey = attribute.Key("messaging.rocketmq.message.keys")
+
+ // MessagingRocketMQMessageTagKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.tag" semantic conventions. It represents the
+ // secondary classifier of message besides topic.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: tagA
+ MessagingRocketMQMessageTagKey = attribute.Key("messaging.rocketmq.message.tag")
+
+ // MessagingRocketMQMessageTypeKey is the attribute Key conforming to the
+ // "messaging.rocketmq.message.type" semantic conventions. It represents the
+ // type of message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ MessagingRocketMQMessageTypeKey = attribute.Key("messaging.rocketmq.message.type")
+
+ // MessagingRocketMQNamespaceKey is the attribute Key conforming to the
+ // "messaging.rocketmq.namespace" semantic conventions. It represents the
+ // namespace of RocketMQ resources, resources in different namespaces are
+ // individual.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myNamespace
+ MessagingRocketMQNamespaceKey = attribute.Key("messaging.rocketmq.namespace")
+
+ // MessagingServiceBusDispositionStatusKey is the attribute Key conforming to
+ // the "messaging.servicebus.disposition_status" semantic conventions. It
+ // represents the describes the [settlement type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [settlement type]: https://learn.microsoft.com/azure/service-bus-messaging/message-transfers-locks-settlement#peeklock
+ MessagingServiceBusDispositionStatusKey = attribute.Key("messaging.servicebus.disposition_status")
+
+ // MessagingServiceBusMessageDeliveryCountKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.delivery_count" semantic conventions. It
+ // represents the number of deliveries that have been attempted for this
+ // message.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageDeliveryCountKey = attribute.Key("messaging.servicebus.message.delivery_count")
+
+ // MessagingServiceBusMessageEnqueuedTimeKey is the attribute Key conforming to
+ // the "messaging.servicebus.message.enqueued_time" semantic conventions. It
+ // represents the UTC epoch seconds at which the message has been accepted and
+ // stored in the entity.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ MessagingServiceBusMessageEnqueuedTimeKey = attribute.Key("messaging.servicebus.message.enqueued_time")
+
+ // MessagingSystemKey is the attribute Key conforming to the "messaging.system"
+ // semantic conventions. It represents the messaging system as identified by the
+ // client instrumentation.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The actual messaging system may differ from the one known by the
+ // client. For example, when using Kafka client libraries to communicate with
+ // Azure Event Hubs, the `messaging.system` is set to `kafka` based on the
+ // instrumentation's best knowledge.
+ MessagingSystemKey = attribute.Key("messaging.system")
+)
+
+// MessagingBatchMessageCount returns an attribute KeyValue conforming to the
+// "messaging.batch.message_count" semantic conventions. It represents the number
+// of messages sent, received, or processed in the scope of the batching
+// operation.
+func MessagingBatchMessageCount(val int) attribute.KeyValue {
+ return MessagingBatchMessageCountKey.Int(val)
+}
+
+// MessagingClientID returns an attribute KeyValue conforming to the
+// "messaging.client.id" semantic conventions. It represents a unique identifier
+// for the client that consumes or produces a message.
+func MessagingClientID(val string) attribute.KeyValue {
+ return MessagingClientIDKey.String(val)
+}
+
+// MessagingConsumerGroupName returns an attribute KeyValue conforming to the
+// "messaging.consumer.group.name" semantic conventions. It represents the name
+// of the consumer group with which a consumer is associated.
+func MessagingConsumerGroupName(val string) attribute.KeyValue {
+ return MessagingConsumerGroupNameKey.String(val)
+}
+
+// MessagingDestinationAnonymous returns an attribute KeyValue conforming to the
+// "messaging.destination.anonymous" semantic conventions. It represents a
+// boolean that is true if the message destination is anonymous (could be unnamed
+// or have auto-generated name).
+func MessagingDestinationAnonymous(val bool) attribute.KeyValue {
+ return MessagingDestinationAnonymousKey.Bool(val)
+}
+
+// MessagingDestinationName returns an attribute KeyValue conforming to the
+// "messaging.destination.name" semantic conventions. It represents the message
+// destination name.
+func MessagingDestinationName(val string) attribute.KeyValue {
+ return MessagingDestinationNameKey.String(val)
+}
+
+// MessagingDestinationPartitionID returns an attribute KeyValue conforming to
+// the "messaging.destination.partition.id" semantic conventions. It represents
+// the identifier of the partition messages are sent to or received from, unique
+// within the `messaging.destination.name`.
+func MessagingDestinationPartitionID(val string) attribute.KeyValue {
+ return MessagingDestinationPartitionIDKey.String(val)
+}
+
+// MessagingDestinationSubscriptionName returns an attribute KeyValue conforming
+// to the "messaging.destination.subscription.name" semantic conventions. It
+// represents the name of the destination subscription from which a message is
+// consumed.
+func MessagingDestinationSubscriptionName(val string) attribute.KeyValue {
+ return MessagingDestinationSubscriptionNameKey.String(val)
+}
+
+// MessagingDestinationTemplate returns an attribute KeyValue conforming to the
+// "messaging.destination.template" semantic conventions. It represents the low
+// cardinality representation of the messaging destination name.
+func MessagingDestinationTemplate(val string) attribute.KeyValue {
+ return MessagingDestinationTemplateKey.String(val)
+}
+
+// MessagingDestinationTemporary returns an attribute KeyValue conforming to the
+// "messaging.destination.temporary" semantic conventions. It represents a
+// boolean that is true if the message destination is temporary and might not
+// exist anymore after messages are processed.
+func MessagingDestinationTemporary(val bool) attribute.KeyValue {
+ return MessagingDestinationTemporaryKey.Bool(val)
+}
+
+// MessagingEventHubsMessageEnqueuedTime returns an attribute KeyValue conforming
+// to the "messaging.eventhubs.message.enqueued_time" semantic conventions. It
+// represents the UTC epoch seconds at which the message has been accepted and
+// stored in the entity.
+func MessagingEventHubsMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingEventHubsMessageEnqueuedTimeKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckDeadline returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ack_deadline" semantic conventions. It
+// represents the ack deadline in seconds set for the modify ack deadline
+// request.
+func MessagingGCPPubSubMessageAckDeadline(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckDeadlineKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageAckID returns an attribute KeyValue conforming to the
+// "messaging.gcp_pubsub.message.ack_id" semantic conventions. It represents the
+// ack id for a given message.
+func MessagingGCPPubSubMessageAckID(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageAckIDKey.String(val)
+}
+
+// MessagingGCPPubSubMessageDeliveryAttempt returns an attribute KeyValue
+// conforming to the "messaging.gcp_pubsub.message.delivery_attempt" semantic
+// conventions. It represents the delivery attempt for a given message.
+func MessagingGCPPubSubMessageDeliveryAttempt(val int) attribute.KeyValue {
+ return MessagingGCPPubSubMessageDeliveryAttemptKey.Int(val)
+}
+
+// MessagingGCPPubSubMessageOrderingKey returns an attribute KeyValue conforming
+// to the "messaging.gcp_pubsub.message.ordering_key" semantic conventions. It
+// represents the ordering key for a given message. If the attribute is not
+// present, the message does not have an ordering key.
+func MessagingGCPPubSubMessageOrderingKey(val string) attribute.KeyValue {
+ return MessagingGCPPubSubMessageOrderingKeyKey.String(val)
+}
+
+// MessagingKafkaMessageKey returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.key" semantic conventions. It represents the message
+// keys in Kafka are used for grouping alike messages to ensure they're processed
+// on the same partition. They differ from `messaging.message.id` in that they're
+// not unique. If the key is `null`, the attribute MUST NOT be set.
+func MessagingKafkaMessageKey(val string) attribute.KeyValue {
+ return MessagingKafkaMessageKeyKey.String(val)
+}
+
+// MessagingKafkaMessageTombstone returns an attribute KeyValue conforming to the
+// "messaging.kafka.message.tombstone" semantic conventions. It represents a
+// boolean that is true if the message is a tombstone.
+func MessagingKafkaMessageTombstone(val bool) attribute.KeyValue {
+ return MessagingKafkaMessageTombstoneKey.Bool(val)
+}
+
+// MessagingKafkaOffset returns an attribute KeyValue conforming to the
+// "messaging.kafka.offset" semantic conventions. It represents the offset of a
+// record in the corresponding Kafka partition.
+func MessagingKafkaOffset(val int) attribute.KeyValue {
+ return MessagingKafkaOffsetKey.Int(val)
+}
+
+// MessagingMessageBodySize returns an attribute KeyValue conforming to the
+// "messaging.message.body.size" semantic conventions. It represents the size of
+// the message body in bytes.
+func MessagingMessageBodySize(val int) attribute.KeyValue {
+ return MessagingMessageBodySizeKey.Int(val)
+}
+
+// MessagingMessageConversationID returns an attribute KeyValue conforming to the
+// "messaging.message.conversation_id" semantic conventions. It represents the
+// conversation ID identifying the conversation to which the message belongs,
+// represented as a string. Sometimes called "Correlation ID".
+func MessagingMessageConversationID(val string) attribute.KeyValue {
+ return MessagingMessageConversationIDKey.String(val)
+}
+
+// MessagingMessageEnvelopeSize returns an attribute KeyValue conforming to the
+// "messaging.message.envelope.size" semantic conventions. It represents the size
+// of the message body and metadata in bytes.
+func MessagingMessageEnvelopeSize(val int) attribute.KeyValue {
+ return MessagingMessageEnvelopeSizeKey.Int(val)
+}
+
+// MessagingMessageID returns an attribute KeyValue conforming to the
+// "messaging.message.id" semantic conventions. It represents a value used by the
+// messaging system as an identifier for the message, represented as a string.
+func MessagingMessageID(val string) attribute.KeyValue {
+ return MessagingMessageIDKey.String(val)
+}
+
+// MessagingOperationName returns an attribute KeyValue conforming to the
+// "messaging.operation.name" semantic conventions. It represents the
+// system-specific name of the messaging operation.
+func MessagingOperationName(val string) attribute.KeyValue {
+ return MessagingOperationNameKey.String(val)
+}
+
+// MessagingRabbitMQDestinationRoutingKey returns an attribute KeyValue
+// conforming to the "messaging.rabbitmq.destination.routing_key" semantic
+// conventions. It represents the rabbitMQ message routing key.
+func MessagingRabbitMQDestinationRoutingKey(val string) attribute.KeyValue {
+ return MessagingRabbitMQDestinationRoutingKeyKey.String(val)
+}
+
+// MessagingRabbitMQMessageDeliveryTag returns an attribute KeyValue conforming
+// to the "messaging.rabbitmq.message.delivery_tag" semantic conventions. It
+// represents the rabbitMQ message delivery tag.
+func MessagingRabbitMQMessageDeliveryTag(val int) attribute.KeyValue {
+ return MessagingRabbitMQMessageDeliveryTagKey.Int(val)
+}
+
+// MessagingRocketMQMessageDelayTimeLevel returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delay_time_level" semantic
+// conventions. It represents the delay time level for delay message, which
+// determines the message delay time.
+func MessagingRocketMQMessageDelayTimeLevel(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDelayTimeLevelKey.Int(val)
+}
+
+// MessagingRocketMQMessageDeliveryTimestamp returns an attribute KeyValue
+// conforming to the "messaging.rocketmq.message.delivery_timestamp" semantic
+// conventions. It represents the timestamp in milliseconds that the delay
+// message is expected to be delivered to consumer.
+func MessagingRocketMQMessageDeliveryTimestamp(val int) attribute.KeyValue {
+ return MessagingRocketMQMessageDeliveryTimestampKey.Int(val)
+}
+
+// MessagingRocketMQMessageGroup returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.group" semantic conventions. It represents the it
+// is essential for FIFO message. Messages that belong to the same message group
+// are always processed one by one within the same consumer group.
+func MessagingRocketMQMessageGroup(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageGroupKey.String(val)
+}
+
+// MessagingRocketMQMessageKeys returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.keys" semantic conventions. It represents the
+// key(s) of message, another way to mark message besides message id.
+func MessagingRocketMQMessageKeys(val ...string) attribute.KeyValue {
+ return MessagingRocketMQMessageKeysKey.StringSlice(val)
+}
+
+// MessagingRocketMQMessageTag returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.message.tag" semantic conventions. It represents the
+// secondary classifier of message besides topic.
+func MessagingRocketMQMessageTag(val string) attribute.KeyValue {
+ return MessagingRocketMQMessageTagKey.String(val)
+}
+
+// MessagingRocketMQNamespace returns an attribute KeyValue conforming to the
+// "messaging.rocketmq.namespace" semantic conventions. It represents the
+// namespace of RocketMQ resources, resources in different namespaces are
+// individual.
+func MessagingRocketMQNamespace(val string) attribute.KeyValue {
+ return MessagingRocketMQNamespaceKey.String(val)
+}
+
+// MessagingServiceBusMessageDeliveryCount returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.delivery_count" semantic
+// conventions. It represents the number of deliveries that have been attempted
+// for this message.
+func MessagingServiceBusMessageDeliveryCount(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageDeliveryCountKey.Int(val)
+}
+
+// MessagingServiceBusMessageEnqueuedTime returns an attribute KeyValue
+// conforming to the "messaging.servicebus.message.enqueued_time" semantic
+// conventions. It represents the UTC epoch seconds at which the message has been
+// accepted and stored in the entity.
+func MessagingServiceBusMessageEnqueuedTime(val int) attribute.KeyValue {
+ return MessagingServiceBusMessageEnqueuedTimeKey.Int(val)
+}
+
+// Enum values for messaging.operation.type
+var (
+ // A message is created. "Create" spans always refer to a single message and are
+ // used to provide a unique creation context for messages in batch sending
+ // scenarios.
+ //
+ // Stability: development
+ MessagingOperationTypeCreate = MessagingOperationTypeKey.String("create")
+ // One or more messages are provided for sending to an intermediary. If a single
+ // message is sent, the context of the "Send" span can be used as the creation
+ // context and no "Create" span needs to be created.
+ //
+ // Stability: development
+ MessagingOperationTypeSend = MessagingOperationTypeKey.String("send")
+ // One or more messages are requested by a consumer. This operation refers to
+ // pull-based scenarios, where consumers explicitly call methods of messaging
+ // SDKs to receive messages.
+ //
+ // Stability: development
+ MessagingOperationTypeReceive = MessagingOperationTypeKey.String("receive")
+ // One or more messages are processed by a consumer.
+ //
+ // Stability: development
+ MessagingOperationTypeProcess = MessagingOperationTypeKey.String("process")
+ // One or more messages are settled.
+ //
+ // Stability: development
+ MessagingOperationTypeSettle = MessagingOperationTypeKey.String("settle")
+)
+
+// Enum values for messaging.rocketmq.consumption_model
+var (
+ // Clustering consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelClustering = MessagingRocketMQConsumptionModelKey.String("clustering")
+ // Broadcasting consumption model
+ // Stability: development
+ MessagingRocketMQConsumptionModelBroadcasting = MessagingRocketMQConsumptionModelKey.String("broadcasting")
+)
+
+// Enum values for messaging.rocketmq.message.type
+var (
+ // Normal message
+ // Stability: development
+ MessagingRocketMQMessageTypeNormal = MessagingRocketMQMessageTypeKey.String("normal")
+ // FIFO message
+ // Stability: development
+ MessagingRocketMQMessageTypeFifo = MessagingRocketMQMessageTypeKey.String("fifo")
+ // Delay message
+ // Stability: development
+ MessagingRocketMQMessageTypeDelay = MessagingRocketMQMessageTypeKey.String("delay")
+ // Transaction message
+ // Stability: development
+ MessagingRocketMQMessageTypeTransaction = MessagingRocketMQMessageTypeKey.String("transaction")
+)
+
+// Enum values for messaging.servicebus.disposition_status
+var (
+ // Message is completed
+ // Stability: development
+ MessagingServiceBusDispositionStatusComplete = MessagingServiceBusDispositionStatusKey.String("complete")
+ // Message is abandoned
+ // Stability: development
+ MessagingServiceBusDispositionStatusAbandon = MessagingServiceBusDispositionStatusKey.String("abandon")
+ // Message is sent to dead letter queue
+ // Stability: development
+ MessagingServiceBusDispositionStatusDeadLetter = MessagingServiceBusDispositionStatusKey.String("dead_letter")
+ // Message is deferred
+ // Stability: development
+ MessagingServiceBusDispositionStatusDefer = MessagingServiceBusDispositionStatusKey.String("defer")
+)
+
+// Enum values for messaging.system
+var (
+ // Apache ActiveMQ
+ // Stability: development
+ MessagingSystemActiveMQ = MessagingSystemKey.String("activemq")
+ // Amazon Simple Notification Service (SNS)
+ // Stability: development
+ MessagingSystemAWSSNS = MessagingSystemKey.String("aws.sns")
+ // Amazon Simple Queue Service (SQS)
+ // Stability: development
+ MessagingSystemAWSSQS = MessagingSystemKey.String("aws_sqs")
+ // Azure Event Grid
+ // Stability: development
+ MessagingSystemEventGrid = MessagingSystemKey.String("eventgrid")
+ // Azure Event Hubs
+ // Stability: development
+ MessagingSystemEventHubs = MessagingSystemKey.String("eventhubs")
+ // Azure Service Bus
+ // Stability: development
+ MessagingSystemServiceBus = MessagingSystemKey.String("servicebus")
+ // Google Cloud Pub/Sub
+ // Stability: development
+ MessagingSystemGCPPubSub = MessagingSystemKey.String("gcp_pubsub")
+ // Java Message Service
+ // Stability: development
+ MessagingSystemJMS = MessagingSystemKey.String("jms")
+ // Apache Kafka
+ // Stability: development
+ MessagingSystemKafka = MessagingSystemKey.String("kafka")
+ // RabbitMQ
+ // Stability: development
+ MessagingSystemRabbitMQ = MessagingSystemKey.String("rabbitmq")
+ // Apache RocketMQ
+ // Stability: development
+ MessagingSystemRocketMQ = MessagingSystemKey.String("rocketmq")
+ // Apache Pulsar
+ // Stability: development
+ MessagingSystemPulsar = MessagingSystemKey.String("pulsar")
+)
+
+// Namespace: network
+const (
+ // NetworkCarrierICCKey is the attribute Key conforming to the
+ // "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+ // alpha-2 2-character country code associated with the mobile carrier network.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: DE
+ NetworkCarrierICCKey = attribute.Key("network.carrier.icc")
+
+ // NetworkCarrierMCCKey is the attribute Key conforming to the
+ // "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+ // country code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 310
+ NetworkCarrierMCCKey = attribute.Key("network.carrier.mcc")
+
+ // NetworkCarrierMNCKey is the attribute Key conforming to the
+ // "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+ // network code.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 001
+ NetworkCarrierMNCKey = attribute.Key("network.carrier.mnc")
+
+ // NetworkCarrierNameKey is the attribute Key conforming to the
+ // "network.carrier.name" semantic conventions. It represents the name of the
+ // mobile carrier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: sprint
+ NetworkCarrierNameKey = attribute.Key("network.carrier.name")
+
+ // NetworkConnectionStateKey is the attribute Key conforming to the
+ // "network.connection.state" semantic conventions. It represents the state of
+ // network connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "close_wait"
+ // Note: Connection states are defined as part of the [rfc9293]
+ //
+ // [rfc9293]: https://datatracker.ietf.org/doc/html/rfc9293#section-3.3.2
+ NetworkConnectionStateKey = attribute.Key("network.connection.state")
+
+ // NetworkConnectionSubtypeKey is the attribute Key conforming to the
+ // "network.connection.subtype" semantic conventions. It represents the this
+ // describes more details regarding the connection.type. It may be the type of
+ // cell technology connection, but it could be used for describing details about
+ // a wifi connection.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: LTE
+ NetworkConnectionSubtypeKey = attribute.Key("network.connection.subtype")
+
+ // NetworkConnectionTypeKey is the attribute Key conforming to the
+ // "network.connection.type" semantic conventions. It represents the internet
+ // connection type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: wifi
+ NetworkConnectionTypeKey = attribute.Key("network.connection.type")
+
+ // NetworkInterfaceNameKey is the attribute Key conforming to the
+ // "network.interface.name" semantic conventions. It represents the network
+ // interface name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "lo", "eth0"
+ NetworkInterfaceNameKey = attribute.Key("network.interface.name")
+
+ // NetworkIODirectionKey is the attribute Key conforming to the
+ // "network.io.direction" semantic conventions. It represents the network IO
+ // operation direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "transmit"
+ NetworkIODirectionKey = attribute.Key("network.io.direction")
+
+ // NetworkLocalAddressKey is the attribute Key conforming to the
+ // "network.local.address" semantic conventions. It represents the local address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkLocalAddressKey = attribute.Key("network.local.address")
+
+ // NetworkLocalPortKey is the attribute Key conforming to the
+ // "network.local.port" semantic conventions. It represents the local port
+ // number of the network connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkLocalPortKey = attribute.Key("network.local.port")
+
+ // NetworkPeerAddressKey is the attribute Key conforming to the
+ // "network.peer.address" semantic conventions. It represents the peer address
+ // of the network connection - IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "10.1.2.80", "/tmp/my.sock"
+ NetworkPeerAddressKey = attribute.Key("network.peer.address")
+
+ // NetworkPeerPortKey is the attribute Key conforming to the "network.peer.port"
+ // semantic conventions. It represents the peer port number of the network
+ // connection.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 65123
+ NetworkPeerPortKey = attribute.Key("network.peer.port")
+
+ // NetworkProtocolNameKey is the attribute Key conforming to the
+ // "network.protocol.name" semantic conventions. It represents the
+ // [OSI application layer] or non-OSI equivalent.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "amqp", "http", "mqtt"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+ NetworkProtocolNameKey = attribute.Key("network.protocol.name")
+
+ // NetworkProtocolVersionKey is the attribute Key conforming to the
+ // "network.protocol.version" semantic conventions. It represents the actual
+ // version of the protocol used for network communication.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.1", "2"
+ // Note: If protocol version is subject to negotiation (for example using [ALPN]
+ // ), this attribute SHOULD be set to the negotiated version. If the actual
+ // protocol version is not known, this attribute SHOULD NOT be set.
+ //
+ // [ALPN]: https://www.rfc-editor.org/rfc/rfc7301.html
+ NetworkProtocolVersionKey = attribute.Key("network.protocol.version")
+
+ // NetworkTransportKey is the attribute Key conforming to the
+ // "network.transport" semantic conventions. It represents the
+ // [OSI transport layer] or [inter-process communication method].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "tcp", "udp"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // Consider always setting the transport when setting a port number, since
+ // a port number is ambiguous without knowing the transport. For example
+ // different processes could be listening on TCP port 12345 and UDP port 12345.
+ //
+ // [OSI transport layer]: https://wikipedia.org/wiki/Transport_layer
+ // [inter-process communication method]: https://wikipedia.org/wiki/Inter-process_communication
+ NetworkTransportKey = attribute.Key("network.transport")
+
+ // NetworkTypeKey is the attribute Key conforming to the "network.type" semantic
+ // conventions. It represents the [OSI network layer] or non-OSI equivalent.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "ipv4", "ipv6"
+ // Note: The value SHOULD be normalized to lowercase.
+ //
+ // [OSI network layer]: https://wikipedia.org/wiki/Network_layer
+ NetworkTypeKey = attribute.Key("network.type")
+)
+
+// NetworkCarrierICC returns an attribute KeyValue conforming to the
+// "network.carrier.icc" semantic conventions. It represents the ISO 3166-1
+// alpha-2 2-character country code associated with the mobile carrier network.
+func NetworkCarrierICC(val string) attribute.KeyValue {
+ return NetworkCarrierICCKey.String(val)
+}
+
+// NetworkCarrierMCC returns an attribute KeyValue conforming to the
+// "network.carrier.mcc" semantic conventions. It represents the mobile carrier
+// country code.
+func NetworkCarrierMCC(val string) attribute.KeyValue {
+ return NetworkCarrierMCCKey.String(val)
+}
+
+// NetworkCarrierMNC returns an attribute KeyValue conforming to the
+// "network.carrier.mnc" semantic conventions. It represents the mobile carrier
+// network code.
+func NetworkCarrierMNC(val string) attribute.KeyValue {
+ return NetworkCarrierMNCKey.String(val)
+}
+
+// NetworkCarrierName returns an attribute KeyValue conforming to the
+// "network.carrier.name" semantic conventions. It represents the name of the
+// mobile carrier.
+func NetworkCarrierName(val string) attribute.KeyValue {
+ return NetworkCarrierNameKey.String(val)
+}
+
+// NetworkInterfaceName returns an attribute KeyValue conforming to the
+// "network.interface.name" semantic conventions. It represents the network
+// interface name.
+func NetworkInterfaceName(val string) attribute.KeyValue {
+ return NetworkInterfaceNameKey.String(val)
+}
+
+// NetworkLocalAddress returns an attribute KeyValue conforming to the
+// "network.local.address" semantic conventions. It represents the local address
+// of the network connection - IP address or Unix domain socket name.
+func NetworkLocalAddress(val string) attribute.KeyValue {
+ return NetworkLocalAddressKey.String(val)
+}
+
+// NetworkLocalPort returns an attribute KeyValue conforming to the
+// "network.local.port" semantic conventions. It represents the local port number
+// of the network connection.
+func NetworkLocalPort(val int) attribute.KeyValue {
+ return NetworkLocalPortKey.Int(val)
+}
+
+// NetworkPeerAddress returns an attribute KeyValue conforming to the
+// "network.peer.address" semantic conventions. It represents the peer address of
+// the network connection - IP address or Unix domain socket name.
+func NetworkPeerAddress(val string) attribute.KeyValue {
+ return NetworkPeerAddressKey.String(val)
+}
+
+// NetworkPeerPort returns an attribute KeyValue conforming to the
+// "network.peer.port" semantic conventions. It represents the peer port number
+// of the network connection.
+func NetworkPeerPort(val int) attribute.KeyValue {
+ return NetworkPeerPortKey.Int(val)
+}
+
+// NetworkProtocolName returns an attribute KeyValue conforming to the
+// "network.protocol.name" semantic conventions. It represents the
+// [OSI application layer] or non-OSI equivalent.
+//
+// [OSI application layer]: https://wikipedia.org/wiki/Application_layer
+func NetworkProtocolName(val string) attribute.KeyValue {
+ return NetworkProtocolNameKey.String(val)
+}
+
+// NetworkProtocolVersion returns an attribute KeyValue conforming to the
+// "network.protocol.version" semantic conventions. It represents the actual
+// version of the protocol used for network communication.
+func NetworkProtocolVersion(val string) attribute.KeyValue {
+ return NetworkProtocolVersionKey.String(val)
+}
+
+// Enum values for network.connection.state
+var (
+ // closed
+ // Stability: development
+ NetworkConnectionStateClosed = NetworkConnectionStateKey.String("closed")
+ // close_wait
+ // Stability: development
+ NetworkConnectionStateCloseWait = NetworkConnectionStateKey.String("close_wait")
+ // closing
+ // Stability: development
+ NetworkConnectionStateClosing = NetworkConnectionStateKey.String("closing")
+ // established
+ // Stability: development
+ NetworkConnectionStateEstablished = NetworkConnectionStateKey.String("established")
+ // fin_wait_1
+ // Stability: development
+ NetworkConnectionStateFinWait1 = NetworkConnectionStateKey.String("fin_wait_1")
+ // fin_wait_2
+ // Stability: development
+ NetworkConnectionStateFinWait2 = NetworkConnectionStateKey.String("fin_wait_2")
+ // last_ack
+ // Stability: development
+ NetworkConnectionStateLastAck = NetworkConnectionStateKey.String("last_ack")
+ // listen
+ // Stability: development
+ NetworkConnectionStateListen = NetworkConnectionStateKey.String("listen")
+ // syn_received
+ // Stability: development
+ NetworkConnectionStateSynReceived = NetworkConnectionStateKey.String("syn_received")
+ // syn_sent
+ // Stability: development
+ NetworkConnectionStateSynSent = NetworkConnectionStateKey.String("syn_sent")
+ // time_wait
+ // Stability: development
+ NetworkConnectionStateTimeWait = NetworkConnectionStateKey.String("time_wait")
+)
+
+// Enum values for network.connection.subtype
+var (
+ // GPRS
+ // Stability: development
+ NetworkConnectionSubtypeGprs = NetworkConnectionSubtypeKey.String("gprs")
+ // EDGE
+ // Stability: development
+ NetworkConnectionSubtypeEdge = NetworkConnectionSubtypeKey.String("edge")
+ // UMTS
+ // Stability: development
+ NetworkConnectionSubtypeUmts = NetworkConnectionSubtypeKey.String("umts")
+ // CDMA
+ // Stability: development
+ NetworkConnectionSubtypeCdma = NetworkConnectionSubtypeKey.String("cdma")
+ // EVDO Rel. 0
+ // Stability: development
+ NetworkConnectionSubtypeEvdo0 = NetworkConnectionSubtypeKey.String("evdo_0")
+ // EVDO Rev. A
+ // Stability: development
+ NetworkConnectionSubtypeEvdoA = NetworkConnectionSubtypeKey.String("evdo_a")
+ // CDMA2000 1XRTT
+ // Stability: development
+ NetworkConnectionSubtypeCdma20001xrtt = NetworkConnectionSubtypeKey.String("cdma2000_1xrtt")
+ // HSDPA
+ // Stability: development
+ NetworkConnectionSubtypeHsdpa = NetworkConnectionSubtypeKey.String("hsdpa")
+ // HSUPA
+ // Stability: development
+ NetworkConnectionSubtypeHsupa = NetworkConnectionSubtypeKey.String("hsupa")
+ // HSPA
+ // Stability: development
+ NetworkConnectionSubtypeHspa = NetworkConnectionSubtypeKey.String("hspa")
+ // IDEN
+ // Stability: development
+ NetworkConnectionSubtypeIden = NetworkConnectionSubtypeKey.String("iden")
+ // EVDO Rev. B
+ // Stability: development
+ NetworkConnectionSubtypeEvdoB = NetworkConnectionSubtypeKey.String("evdo_b")
+ // LTE
+ // Stability: development
+ NetworkConnectionSubtypeLte = NetworkConnectionSubtypeKey.String("lte")
+ // EHRPD
+ // Stability: development
+ NetworkConnectionSubtypeEhrpd = NetworkConnectionSubtypeKey.String("ehrpd")
+ // HSPAP
+ // Stability: development
+ NetworkConnectionSubtypeHspap = NetworkConnectionSubtypeKey.String("hspap")
+ // GSM
+ // Stability: development
+ NetworkConnectionSubtypeGsm = NetworkConnectionSubtypeKey.String("gsm")
+ // TD-SCDMA
+ // Stability: development
+ NetworkConnectionSubtypeTdScdma = NetworkConnectionSubtypeKey.String("td_scdma")
+ // IWLAN
+ // Stability: development
+ NetworkConnectionSubtypeIwlan = NetworkConnectionSubtypeKey.String("iwlan")
+ // 5G NR (New Radio)
+ // Stability: development
+ NetworkConnectionSubtypeNr = NetworkConnectionSubtypeKey.String("nr")
+ // 5G NRNSA (New Radio Non-Standalone)
+ // Stability: development
+ NetworkConnectionSubtypeNrnsa = NetworkConnectionSubtypeKey.String("nrnsa")
+ // LTE CA
+ // Stability: development
+ NetworkConnectionSubtypeLteCa = NetworkConnectionSubtypeKey.String("lte_ca")
+)
+
+// Enum values for network.connection.type
+var (
+ // wifi
+ // Stability: development
+ NetworkConnectionTypeWifi = NetworkConnectionTypeKey.String("wifi")
+ // wired
+ // Stability: development
+ NetworkConnectionTypeWired = NetworkConnectionTypeKey.String("wired")
+ // cell
+ // Stability: development
+ NetworkConnectionTypeCell = NetworkConnectionTypeKey.String("cell")
+ // unavailable
+ // Stability: development
+ NetworkConnectionTypeUnavailable = NetworkConnectionTypeKey.String("unavailable")
+ // unknown
+ // Stability: development
+ NetworkConnectionTypeUnknown = NetworkConnectionTypeKey.String("unknown")
+)
+
+// Enum values for network.io.direction
+var (
+ // transmit
+ // Stability: development
+ NetworkIODirectionTransmit = NetworkIODirectionKey.String("transmit")
+ // receive
+ // Stability: development
+ NetworkIODirectionReceive = NetworkIODirectionKey.String("receive")
+)
+
+// Enum values for network.transport
+var (
+ // TCP
+ // Stability: stable
+ NetworkTransportTCP = NetworkTransportKey.String("tcp")
+ // UDP
+ // Stability: stable
+ NetworkTransportUDP = NetworkTransportKey.String("udp")
+ // Named or anonymous pipe.
+ // Stability: stable
+ NetworkTransportPipe = NetworkTransportKey.String("pipe")
+ // Unix domain socket
+ // Stability: stable
+ NetworkTransportUnix = NetworkTransportKey.String("unix")
+ // QUIC
+ // Stability: stable
+ NetworkTransportQUIC = NetworkTransportKey.String("quic")
+)
+
+// Enum values for network.type
+var (
+ // IPv4
+ // Stability: stable
+ NetworkTypeIPv4 = NetworkTypeKey.String("ipv4")
+ // IPv6
+ // Stability: stable
+ NetworkTypeIPv6 = NetworkTypeKey.String("ipv6")
+)
+
+// Namespace: oci
+const (
+ // OCIManifestDigestKey is the attribute Key conforming to the
+ // "oci.manifest.digest" semantic conventions. It represents the digest of the
+ // OCI image manifest. For container images specifically is the digest by which
+ // the container image is known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "sha256:e4ca62c0d62f3e886e684806dfe9d4e0cda60d54986898173c1083856cfda0f4"
+ // Note: Follows [OCI Image Manifest Specification], and specifically the
+ // [Digest property].
+ // An example can be found in [Example Image Manifest].
+ //
+ // [OCI Image Manifest Specification]: https://github.com/opencontainers/image-spec/blob/main/manifest.md
+ // [Digest property]: https://github.com/opencontainers/image-spec/blob/main/descriptor.md#digests
+ // [Example Image Manifest]: https://github.com/opencontainers/image-spec/blob/main/manifest.md#example-image-manifest
+ OCIManifestDigestKey = attribute.Key("oci.manifest.digest")
+)
+
+// OCIManifestDigest returns an attribute KeyValue conforming to the
+// "oci.manifest.digest" semantic conventions. It represents the digest of the
+// OCI image manifest. For container images specifically is the digest by which
+// the container image is known.
+func OCIManifestDigest(val string) attribute.KeyValue {
+ return OCIManifestDigestKey.String(val)
+}
+
+// Namespace: openai
+const (
+ // OpenAIRequestServiceTierKey is the attribute Key conforming to the
+ // "openai.request.service_tier" semantic conventions. It represents the service
+ // tier requested. May be a specific tier, default, or auto.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "auto", "default"
+ OpenAIRequestServiceTierKey = attribute.Key("openai.request.service_tier")
+
+ // OpenAIResponseServiceTierKey is the attribute Key conforming to the
+ // "openai.response.service_tier" semantic conventions. It represents the
+ // service tier used for the response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "scale", "default"
+ OpenAIResponseServiceTierKey = attribute.Key("openai.response.service_tier")
+
+ // OpenAIResponseSystemFingerprintKey is the attribute Key conforming to the
+ // "openai.response.system_fingerprint" semantic conventions. It represents a
+ // fingerprint to track any eventual change in the Generative AI environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "fp_44709d6fcb"
+ OpenAIResponseSystemFingerprintKey = attribute.Key("openai.response.system_fingerprint")
+)
+
+// OpenAIResponseServiceTier returns an attribute KeyValue conforming to the
+// "openai.response.service_tier" semantic conventions. It represents the service
+// tier used for the response.
+func OpenAIResponseServiceTier(val string) attribute.KeyValue {
+ return OpenAIResponseServiceTierKey.String(val)
+}
+
+// OpenAIResponseSystemFingerprint returns an attribute KeyValue conforming to
+// the "openai.response.system_fingerprint" semantic conventions. It represents a
+// fingerprint to track any eventual change in the Generative AI environment.
+func OpenAIResponseSystemFingerprint(val string) attribute.KeyValue {
+ return OpenAIResponseSystemFingerprintKey.String(val)
+}
+
+// Enum values for openai.request.service_tier
+var (
+ // The system will utilize scale tier credits until they are exhausted.
+ // Stability: development
+ OpenAIRequestServiceTierAuto = OpenAIRequestServiceTierKey.String("auto")
+ // The system will utilize the default scale tier.
+ // Stability: development
+ OpenAIRequestServiceTierDefault = OpenAIRequestServiceTierKey.String("default")
+)
+
+// Namespace: opentracing
+const (
+ // OpenTracingRefTypeKey is the attribute Key conforming to the
+ // "opentracing.ref_type" semantic conventions. It represents the parent-child
+ // Reference type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: The causal relationship between a child Span and a parent Span.
+ OpenTracingRefTypeKey = attribute.Key("opentracing.ref_type")
+)
+
+// Enum values for opentracing.ref_type
+var (
+ // The parent Span depends on the child Span in some capacity
+ // Stability: development
+ OpenTracingRefTypeChildOf = OpenTracingRefTypeKey.String("child_of")
+ // The parent Span doesn't depend in any way on the result of the child Span
+ // Stability: development
+ OpenTracingRefTypeFollowsFrom = OpenTracingRefTypeKey.String("follows_from")
+)
+
+// Namespace: os
+const (
+ // OSBuildIDKey is the attribute Key conforming to the "os.build_id" semantic
+ // conventions. It represents the unique identifier for a particular build or
+ // compilation of the operating system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TQ3C.230805.001.B2", "20E247", "22621"
+ OSBuildIDKey = attribute.Key("os.build_id")
+
+ // OSDescriptionKey is the attribute Key conforming to the "os.description"
+ // semantic conventions. It represents the human readable (not intended to be
+ // parsed) OS version information, like e.g. reported by `ver` or
+ // `lsb_release -a` commands.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Microsoft Windows [Version 10.0.18363.778]", "Ubuntu 18.04.1 LTS"
+ OSDescriptionKey = attribute.Key("os.description")
+
+ // OSNameKey is the attribute Key conforming to the "os.name" semantic
+ // conventions. It represents the human readable operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ OSNameKey = attribute.Key("os.name")
+
+ // OSTypeKey is the attribute Key conforming to the "os.type" semantic
+ // conventions. It represents the operating system type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OSTypeKey = attribute.Key("os.type")
+
+ // OSVersionKey is the attribute Key conforming to the "os.version" semantic
+ // conventions. It represents the version string of the operating system as
+ // defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ OSVersionKey = attribute.Key("os.version")
+)
+
+// OSBuildID returns an attribute KeyValue conforming to the "os.build_id"
+// semantic conventions. It represents the unique identifier for a particular
+// build or compilation of the operating system.
+func OSBuildID(val string) attribute.KeyValue {
+ return OSBuildIDKey.String(val)
+}
+
+// OSDescription returns an attribute KeyValue conforming to the "os.description"
+// semantic conventions. It represents the human readable (not intended to be
+// parsed) OS version information, like e.g. reported by `ver` or
+// `lsb_release -a` commands.
+func OSDescription(val string) attribute.KeyValue {
+ return OSDescriptionKey.String(val)
+}
+
+// OSName returns an attribute KeyValue conforming to the "os.name" semantic
+// conventions. It represents the human readable operating system name.
+func OSName(val string) attribute.KeyValue {
+ return OSNameKey.String(val)
+}
+
+// OSVersion returns an attribute KeyValue conforming to the "os.version"
+// semantic conventions. It represents the version string of the operating system
+// as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func OSVersion(val string) attribute.KeyValue {
+ return OSVersionKey.String(val)
+}
+
+// Enum values for os.type
+var (
+ // Microsoft Windows
+ // Stability: development
+ OSTypeWindows = OSTypeKey.String("windows")
+ // Linux
+ // Stability: development
+ OSTypeLinux = OSTypeKey.String("linux")
+ // Apple Darwin
+ // Stability: development
+ OSTypeDarwin = OSTypeKey.String("darwin")
+ // FreeBSD
+ // Stability: development
+ OSTypeFreeBSD = OSTypeKey.String("freebsd")
+ // NetBSD
+ // Stability: development
+ OSTypeNetBSD = OSTypeKey.String("netbsd")
+ // OpenBSD
+ // Stability: development
+ OSTypeOpenBSD = OSTypeKey.String("openbsd")
+ // DragonFly BSD
+ // Stability: development
+ OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd")
+ // HP-UX (Hewlett Packard Unix)
+ // Stability: development
+ OSTypeHPUX = OSTypeKey.String("hpux")
+ // AIX (Advanced Interactive eXecutive)
+ // Stability: development
+ OSTypeAIX = OSTypeKey.String("aix")
+ // SunOS, Oracle Solaris
+ // Stability: development
+ OSTypeSolaris = OSTypeKey.String("solaris")
+ // IBM z/OS
+ // Stability: development
+ OSTypeZOS = OSTypeKey.String("zos")
+)
+
+// Namespace: otel
+const (
+ // OTelComponentNameKey is the attribute Key conforming to the
+ // "otel.component.name" semantic conventions. It represents a name uniquely
+ // identifying the instance of the OpenTelemetry component within its containing
+ // SDK instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otlp_grpc_span_exporter/0", "custom-name"
+ // Note: Implementations SHOULD ensure a low cardinality for this attribute,
+ // even across application or SDK restarts.
+ // E.g. implementations MUST NOT use UUIDs as values for this attribute.
+ //
+ // Implementations MAY achieve these goals by following a
+ // `/` pattern, e.g.
+ // `batching_span_processor/0`.
+ // Hereby `otel.component.type` refers to the corresponding attribute value of
+ // the component.
+ //
+ // The value of `instance-counter` MAY be automatically assigned by the
+ // component and uniqueness within the enclosing SDK instance MUST be
+ // guaranteed.
+ // For example, `` MAY be implemented by using a monotonically
+ // increasing counter (starting with `0`), which is incremented every time an
+ // instance of the given component type is started.
+ //
+ // With this implementation, for example the first Batching Span Processor would
+ // have `batching_span_processor/0`
+ // as `otel.component.name`, the second one `batching_span_processor/1` and so
+ // on.
+ // These values will therefore be reused in the case of an application restart.
+ OTelComponentNameKey = attribute.Key("otel.component.name")
+
+ // OTelComponentTypeKey is the attribute Key conforming to the
+ // "otel.component.type" semantic conventions. It represents a name identifying
+ // the type of the OpenTelemetry component.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "batching_span_processor", "com.example.MySpanExporter"
+ // Note: If none of the standardized values apply, implementations SHOULD use
+ // the language-defined name of the type.
+ // E.g. for Java the fully qualified classname SHOULD be used in this case.
+ OTelComponentTypeKey = attribute.Key("otel.component.type")
+
+ // OTelScopeNameKey is the attribute Key conforming to the "otel.scope.name"
+ // semantic conventions. It represents the name of the instrumentation scope - (
+ // `InstrumentationScope.Name` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "io.opentelemetry.contrib.mongodb"
+ OTelScopeNameKey = attribute.Key("otel.scope.name")
+
+ // OTelScopeSchemaURLKey is the attribute Key conforming to the
+ // "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+ // the instrumentation scope.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://opentelemetry.io/schemas/1.31.0"
+ OTelScopeSchemaURLKey = attribute.Key("otel.scope.schema_url")
+
+ // OTelScopeVersionKey is the attribute Key conforming to the
+ // "otel.scope.version" semantic conventions. It represents the version of the
+ // instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.0.0"
+ OTelScopeVersionKey = attribute.Key("otel.scope.version")
+
+ // OTelSpanParentOriginKey is the attribute Key conforming to the
+ // "otel.span.parent.origin" semantic conventions. It represents the determines
+ // whether the span has a parent span, and if so,
+ // [whether it is a remote parent].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginKey = attribute.Key("otel.span.parent.origin")
+
+ // OTelSpanSamplingResultKey is the attribute Key conforming to the
+ // "otel.span.sampling_result" semantic conventions. It represents the result
+ // value of the sampler for this span.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ OTelSpanSamplingResultKey = attribute.Key("otel.span.sampling_result")
+
+ // OTelStatusCodeKey is the attribute Key conforming to the "otel.status_code"
+ // semantic conventions. It represents the name of the code, either "OK" or
+ // "ERROR". MUST NOT be set if the status code is UNSET.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ OTelStatusCodeKey = attribute.Key("otel.status_code")
+
+ // OTelStatusDescriptionKey is the attribute Key conforming to the
+ // "otel.status_description" semantic conventions. It represents the description
+ // of the Status if it has a value, otherwise not set.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "resource not found"
+ OTelStatusDescriptionKey = attribute.Key("otel.status_description")
+)
+
+// OTelComponentName returns an attribute KeyValue conforming to the
+// "otel.component.name" semantic conventions. It represents a name uniquely
+// identifying the instance of the OpenTelemetry component within its containing
+// SDK instance.
+func OTelComponentName(val string) attribute.KeyValue {
+ return OTelComponentNameKey.String(val)
+}
+
+// OTelScopeName returns an attribute KeyValue conforming to the
+// "otel.scope.name" semantic conventions. It represents the name of the
+// instrumentation scope - (`InstrumentationScope.Name` in OTLP).
+func OTelScopeName(val string) attribute.KeyValue {
+ return OTelScopeNameKey.String(val)
+}
+
+// OTelScopeSchemaURL returns an attribute KeyValue conforming to the
+// "otel.scope.schema_url" semantic conventions. It represents the schema URL of
+// the instrumentation scope.
+func OTelScopeSchemaURL(val string) attribute.KeyValue {
+ return OTelScopeSchemaURLKey.String(val)
+}
+
+// OTelScopeVersion returns an attribute KeyValue conforming to the
+// "otel.scope.version" semantic conventions. It represents the version of the
+// instrumentation scope - (`InstrumentationScope.Version` in OTLP).
+func OTelScopeVersion(val string) attribute.KeyValue {
+ return OTelScopeVersionKey.String(val)
+}
+
+// OTelStatusDescription returns an attribute KeyValue conforming to the
+// "otel.status_description" semantic conventions. It represents the description
+// of the Status if it has a value, otherwise not set.
+func OTelStatusDescription(val string) attribute.KeyValue {
+ return OTelStatusDescriptionKey.String(val)
+}
+
+// Enum values for otel.component.type
+var (
+ // The builtin SDK batching span processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingSpanProcessor = OTelComponentTypeKey.String("batching_span_processor")
+ // The builtin SDK simple span processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleSpanProcessor = OTelComponentTypeKey.String("simple_span_processor")
+ // The builtin SDK batching log record processor
+ //
+ // Stability: development
+ OTelComponentTypeBatchingLogProcessor = OTelComponentTypeKey.String("batching_log_processor")
+ // The builtin SDK simple log record processor
+ //
+ // Stability: development
+ OTelComponentTypeSimpleLogProcessor = OTelComponentTypeKey.String("simple_log_processor")
+ // OTLP span exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCSpanExporter = OTelComponentTypeKey.String("otlp_grpc_span_exporter")
+ // OTLP span exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPSpanExporter = OTelComponentTypeKey.String("otlp_http_span_exporter")
+ // OTLP span exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONSpanExporter = OTelComponentTypeKey.String("otlp_http_json_span_exporter")
+ // Zipkin span exporter over HTTP
+ //
+ // Stability: development
+ OTelComponentTypeZipkinHTTPSpanExporter = OTelComponentTypeKey.String("zipkin_http_span_exporter")
+ // OTLP log record exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCLogExporter = OTelComponentTypeKey.String("otlp_grpc_log_exporter")
+ // OTLP log record exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPLogExporter = OTelComponentTypeKey.String("otlp_http_log_exporter")
+ // OTLP log record exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONLogExporter = OTelComponentTypeKey.String("otlp_http_json_log_exporter")
+ // The builtin SDK periodically exporting metric reader
+ //
+ // Stability: development
+ OTelComponentTypePeriodicMetricReader = OTelComponentTypeKey.String("periodic_metric_reader")
+ // OTLP metric exporter over gRPC with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpGRPCMetricExporter = OTelComponentTypeKey.String("otlp_grpc_metric_exporter")
+ // OTLP metric exporter over HTTP with protobuf serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPMetricExporter = OTelComponentTypeKey.String("otlp_http_metric_exporter")
+ // OTLP metric exporter over HTTP with JSON serialization
+ //
+ // Stability: development
+ OTelComponentTypeOtlpHTTPJSONMetricExporter = OTelComponentTypeKey.String("otlp_http_json_metric_exporter")
+ // Prometheus metric exporter over HTTP with the default text-based format
+ //
+ // Stability: development
+ OTelComponentTypePrometheusHTTPTextMetricExporter = OTelComponentTypeKey.String("prometheus_http_text_metric_exporter")
+)
+
+// Enum values for otel.span.parent.origin
+var (
+ // The span does not have a parent, it is a root span
+ // Stability: development
+ OTelSpanParentOriginNone = OTelSpanParentOriginKey.String("none")
+ // The span has a parent and the parent's span context [isRemote()] is false
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginLocal = OTelSpanParentOriginKey.String("local")
+ // The span has a parent and the parent's span context [isRemote()] is true
+ // Stability: development
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ OTelSpanParentOriginRemote = OTelSpanParentOriginKey.String("remote")
+)
+
+// Enum values for otel.span.sampling_result
+var (
+ // The span is not sampled and not recording
+ // Stability: development
+ OTelSpanSamplingResultDrop = OTelSpanSamplingResultKey.String("DROP")
+ // The span is not sampled, but recording
+ // Stability: development
+ OTelSpanSamplingResultRecordOnly = OTelSpanSamplingResultKey.String("RECORD_ONLY")
+ // The span is sampled and recording
+ // Stability: development
+ OTelSpanSamplingResultRecordAndSample = OTelSpanSamplingResultKey.String("RECORD_AND_SAMPLE")
+)
+
+// Enum values for otel.status_code
+var (
+ // The operation has been validated by an Application developer or Operator to
+ // have completed successfully.
+ // Stability: stable
+ OTelStatusCodeOk = OTelStatusCodeKey.String("OK")
+ // The operation contains an error.
+ // Stability: stable
+ OTelStatusCodeError = OTelStatusCodeKey.String("ERROR")
+)
+
+// Namespace: peer
+const (
+ // PeerServiceKey is the attribute Key conforming to the "peer.service" semantic
+ // conventions. It represents the [`service.name`] of the remote service. SHOULD
+ // be equal to the actual `service.name` resource attribute of the remote
+ // service if any.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: AuthTokenCache
+ //
+ // [`service.name`]: /docs/resource/README.md#service
+ PeerServiceKey = attribute.Key("peer.service")
+)
+
+// PeerService returns an attribute KeyValue conforming to the "peer.service"
+// semantic conventions. It represents the [`service.name`] of the remote
+// service. SHOULD be equal to the actual `service.name` resource attribute of
+// the remote service if any.
+//
+// [`service.name`]: /docs/resource/README.md#service
+func PeerService(val string) attribute.KeyValue {
+ return PeerServiceKey.String(val)
+}
+
+// Namespace: process
+const (
+ // ProcessArgsCountKey is the attribute Key conforming to the
+ // "process.args_count" semantic conventions. It represents the length of the
+ // process.command_args array.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 4
+ // Note: This field can be useful for querying or performing bucket analysis on
+ // how many arguments were provided to start a process. More arguments may be an
+ // indication of suspicious activity.
+ ProcessArgsCountKey = attribute.Key("process.args_count")
+
+ // ProcessCommandKey is the attribute Key conforming to the "process.command"
+ // semantic conventions. It represents the command used to launch the process
+ // (i.e. the command name). On Linux based systems, can be set to the zeroth
+ // string in `proc/[pid]/cmdline`. On Windows, can be set to the first parameter
+ // extracted from `GetCommandLineW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otelcol"
+ ProcessCommandKey = attribute.Key("process.command")
+
+ // ProcessCommandArgsKey is the attribute Key conforming to the
+ // "process.command_args" semantic conventions. It represents the all the
+ // command arguments (including the command/executable itself) as received by
+ // the process. On Linux-based systems (and some other Unixoid systems
+ // supporting procfs), can be set according to the list of null-delimited
+ // strings extracted from `proc/[pid]/cmdline`. For libc-based executables, this
+ // would be the full argv vector passed to `main`. SHOULD NOT be collected by
+ // default unless there is sanitization that excludes sensitive data.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cmd/otecol", "--config=config.yaml"
+ ProcessCommandArgsKey = attribute.Key("process.command_args")
+
+ // ProcessCommandLineKey is the attribute Key conforming to the
+ // "process.command_line" semantic conventions. It represents the full command
+ // used to launch the process as a single string representing the full command.
+ // On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+ // you have to assemble it just for monitoring; use `process.command_args`
+ // instead. SHOULD NOT be collected by default unless there is sanitization that
+ // excludes sensitive data.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "C:\cmd\otecol --config="my directory\config.yaml""
+ ProcessCommandLineKey = attribute.Key("process.command_line")
+
+ // ProcessContextSwitchTypeKey is the attribute Key conforming to the
+ // "process.context_switch_type" semantic conventions. It represents the
+ // specifies whether the context switches for this data point were voluntary or
+ // involuntary.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessContextSwitchTypeKey = attribute.Key("process.context_switch_type")
+
+ // ProcessCreationTimeKey is the attribute Key conforming to the
+ // "process.creation.time" semantic conventions. It represents the date and time
+ // the process was created, in ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:25:34.853Z"
+ ProcessCreationTimeKey = attribute.Key("process.creation.time")
+
+ // ProcessExecutableBuildIDGNUKey is the attribute Key conforming to the
+ // "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+ // build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "c89b11207f6479603b0d49bf291c092c2b719293"
+ ProcessExecutableBuildIDGNUKey = attribute.Key("process.executable.build_id.gnu")
+
+ // ProcessExecutableBuildIDGoKey is the attribute Key conforming to the
+ // "process.executable.build_id.go" semantic conventions. It represents the Go
+ // build ID as retrieved by `go tool buildid `.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "foh3mEXu7BLZjsN9pOwG/kATcXlYVCDEFouRMQed_/WwRFB1hPo9LBkekthSPG/x8hMC8emW2cCjXD0_1aY"
+ ProcessExecutableBuildIDGoKey = attribute.Key("process.executable.build_id.go")
+
+ // ProcessExecutableBuildIDHtlhashKey is the attribute Key conforming to the
+ // "process.executable.build_id.htlhash" semantic conventions. It represents the
+ // profiling specific build ID for executables. See the OTel specification for
+ // Profiles for more information.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "600DCAFE4A110000F2BF38C493F5FB92"
+ ProcessExecutableBuildIDHtlhashKey = attribute.Key("process.executable.build_id.htlhash")
+
+ // ProcessExecutableNameKey is the attribute Key conforming to the
+ // "process.executable.name" semantic conventions. It represents the name of the
+ // process executable. On Linux based systems, this SHOULD be set to the base
+ // name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to
+ // the base name of `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "otelcol"
+ ProcessExecutableNameKey = attribute.Key("process.executable.name")
+
+ // ProcessExecutablePathKey is the attribute Key conforming to the
+ // "process.executable.path" semantic conventions. It represents the full path
+ // to the process executable. On Linux based systems, can be set to the target
+ // of `proc/[pid]/exe`. On Windows, can be set to the result of
+ // `GetProcessImageFileNameW`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/usr/bin/cmd/otelcol"
+ ProcessExecutablePathKey = attribute.Key("process.executable.path")
+
+ // ProcessExitCodeKey is the attribute Key conforming to the "process.exit.code"
+ // semantic conventions. It represents the exit code of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 127
+ ProcessExitCodeKey = attribute.Key("process.exit.code")
+
+ // ProcessExitTimeKey is the attribute Key conforming to the "process.exit.time"
+ // semantic conventions. It represents the date and time the process exited, in
+ // ISO 8601 format.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2023-11-21T09:26:12.315Z"
+ ProcessExitTimeKey = attribute.Key("process.exit.time")
+
+ // ProcessGroupLeaderPIDKey is the attribute Key conforming to the
+ // "process.group_leader.pid" semantic conventions. It represents the PID of the
+ // process's group leader. This is also the process group ID (PGID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 23
+ ProcessGroupLeaderPIDKey = attribute.Key("process.group_leader.pid")
+
+ // ProcessInteractiveKey is the attribute Key conforming to the
+ // "process.interactive" semantic conventions. It represents the whether the
+ // process is connected to an interactive shell.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessInteractiveKey = attribute.Key("process.interactive")
+
+ // ProcessLinuxCgroupKey is the attribute Key conforming to the
+ // "process.linux.cgroup" semantic conventions. It represents the control group
+ // associated with the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1:name=systemd:/user.slice/user-1000.slice/session-3.scope",
+ // "0::/user.slice/user-1000.slice/user@1000.service/tmux-spawn-0267755b-4639-4a27-90ed-f19f88e53748.scope"
+ // Note: Control groups (cgroups) are a kernel feature used to organize and
+ // manage process resources. This attribute provides the path(s) to the
+ // cgroup(s) associated with the process, which should match the contents of the
+ // [/proc/[PID]/cgroup] file.
+ //
+ // [/proc/[PID]/cgroup]: https://man7.org/linux/man-pages/man7/cgroups.7.html
+ ProcessLinuxCgroupKey = attribute.Key("process.linux.cgroup")
+
+ // ProcessOwnerKey is the attribute Key conforming to the "process.owner"
+ // semantic conventions. It represents the username of the user that owns the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessOwnerKey = attribute.Key("process.owner")
+
+ // ProcessPagingFaultTypeKey is the attribute Key conforming to the
+ // "process.paging.fault_type" semantic conventions. It represents the type of
+ // page fault for this data point. Type `major` is for major/hard page faults,
+ // and `minor` is for minor/soft page faults.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ ProcessPagingFaultTypeKey = attribute.Key("process.paging.fault_type")
+
+ // ProcessParentPIDKey is the attribute Key conforming to the
+ // "process.parent_pid" semantic conventions. It represents the parent Process
+ // identifier (PPID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 111
+ ProcessParentPIDKey = attribute.Key("process.parent_pid")
+
+ // ProcessPIDKey is the attribute Key conforming to the "process.pid" semantic
+ // conventions. It represents the process identifier (PID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1234
+ ProcessPIDKey = attribute.Key("process.pid")
+
+ // ProcessRealUserIDKey is the attribute Key conforming to the
+ // "process.real_user.id" semantic conventions. It represents the real user ID
+ // (RUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1000
+ ProcessRealUserIDKey = attribute.Key("process.real_user.id")
+
+ // ProcessRealUserNameKey is the attribute Key conforming to the
+ // "process.real_user.name" semantic conventions. It represents the username of
+ // the real user of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessRealUserNameKey = attribute.Key("process.real_user.name")
+
+ // ProcessRuntimeDescriptionKey is the attribute Key conforming to the
+ // "process.runtime.description" semantic conventions. It represents an
+ // additional description about the runtime of the process, for example a
+ // specific vendor customization of the runtime environment.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0
+ ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description")
+
+ // ProcessRuntimeNameKey is the attribute Key conforming to the
+ // "process.runtime.name" semantic conventions. It represents the name of the
+ // runtime of this process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "OpenJDK Runtime Environment"
+ ProcessRuntimeNameKey = attribute.Key("process.runtime.name")
+
+ // ProcessRuntimeVersionKey is the attribute Key conforming to the
+ // "process.runtime.version" semantic conventions. It represents the version of
+ // the runtime of this process, as returned by the runtime without modification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14.0.2
+ ProcessRuntimeVersionKey = attribute.Key("process.runtime.version")
+
+ // ProcessSavedUserIDKey is the attribute Key conforming to the
+ // "process.saved_user.id" semantic conventions. It represents the saved user ID
+ // (SUID) of the process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1002
+ ProcessSavedUserIDKey = attribute.Key("process.saved_user.id")
+
+ // ProcessSavedUserNameKey is the attribute Key conforming to the
+ // "process.saved_user.name" semantic conventions. It represents the username of
+ // the saved user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "operator"
+ ProcessSavedUserNameKey = attribute.Key("process.saved_user.name")
+
+ // ProcessSessionLeaderPIDKey is the attribute Key conforming to the
+ // "process.session_leader.pid" semantic conventions. It represents the PID of
+ // the process's session leader. This is also the session ID (SID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 14
+ ProcessSessionLeaderPIDKey = attribute.Key("process.session_leader.pid")
+
+ // ProcessTitleKey is the attribute Key conforming to the "process.title"
+ // semantic conventions. It represents the process title (proctitle).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cat /etc/hostname", "xfce4-session", "bash"
+ // Note: In many Unix-like systems, process title (proctitle), is the string
+ // that represents the name or command line of a running process, displayed by
+ // system monitoring tools like ps, top, and htop.
+ ProcessTitleKey = attribute.Key("process.title")
+
+ // ProcessUserIDKey is the attribute Key conforming to the "process.user.id"
+ // semantic conventions. It represents the effective user ID (EUID) of the
+ // process.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1001
+ ProcessUserIDKey = attribute.Key("process.user.id")
+
+ // ProcessUserNameKey is the attribute Key conforming to the "process.user.name"
+ // semantic conventions. It represents the username of the effective user of the
+ // process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "root"
+ ProcessUserNameKey = attribute.Key("process.user.name")
+
+ // ProcessVpidKey is the attribute Key conforming to the "process.vpid" semantic
+ // conventions. It represents the virtual process identifier.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 12
+ // Note: The process ID within a PID namespace. This is not necessarily unique
+ // across all processes on the host but it is unique within the process
+ // namespace that the process exists within.
+ ProcessVpidKey = attribute.Key("process.vpid")
+
+ // ProcessWorkingDirectoryKey is the attribute Key conforming to the
+ // "process.working_directory" semantic conventions. It represents the working
+ // directory of the process.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/root"
+ ProcessWorkingDirectoryKey = attribute.Key("process.working_directory")
+)
+
+// ProcessArgsCount returns an attribute KeyValue conforming to the
+// "process.args_count" semantic conventions. It represents the length of the
+// process.command_args array.
+func ProcessArgsCount(val int) attribute.KeyValue {
+ return ProcessArgsCountKey.Int(val)
+}
+
+// ProcessCommand returns an attribute KeyValue conforming to the
+// "process.command" semantic conventions. It represents the command used to
+// launch the process (i.e. the command name). On Linux based systems, can be set
+// to the zeroth string in `proc/[pid]/cmdline`. On Windows, can be set to the
+// first parameter extracted from `GetCommandLineW`.
+func ProcessCommand(val string) attribute.KeyValue {
+ return ProcessCommandKey.String(val)
+}
+
+// ProcessCommandArgs returns an attribute KeyValue conforming to the
+// "process.command_args" semantic conventions. It represents the all the command
+// arguments (including the command/executable itself) as received by the
+// process. On Linux-based systems (and some other Unixoid systems supporting
+// procfs), can be set according to the list of null-delimited strings extracted
+// from `proc/[pid]/cmdline`. For libc-based executables, this would be the full
+// argv vector passed to `main`. SHOULD NOT be collected by default unless there
+// is sanitization that excludes sensitive data.
+func ProcessCommandArgs(val ...string) attribute.KeyValue {
+ return ProcessCommandArgsKey.StringSlice(val)
+}
+
+// ProcessCommandLine returns an attribute KeyValue conforming to the
+// "process.command_line" semantic conventions. It represents the full command
+// used to launch the process as a single string representing the full command.
+// On Windows, can be set to the result of `GetCommandLineW`. Do not set this if
+// you have to assemble it just for monitoring; use `process.command_args`
+// instead. SHOULD NOT be collected by default unless there is sanitization that
+// excludes sensitive data.
+func ProcessCommandLine(val string) attribute.KeyValue {
+ return ProcessCommandLineKey.String(val)
+}
+
+// ProcessCreationTime returns an attribute KeyValue conforming to the
+// "process.creation.time" semantic conventions. It represents the date and time
+// the process was created, in ISO 8601 format.
+func ProcessCreationTime(val string) attribute.KeyValue {
+ return ProcessCreationTimeKey.String(val)
+}
+
+// ProcessEnvironmentVariable returns an attribute KeyValue conforming to the
+// "process.environment_variable" semantic conventions. It represents the process
+// environment variables, `` being the environment variable name, the value
+// being the environment variable value.
+func ProcessEnvironmentVariable(key string, val string) attribute.KeyValue {
+ return attribute.String("process.environment_variable."+key, val)
+}
+
+// ProcessExecutableBuildIDGNU returns an attribute KeyValue conforming to the
+// "process.executable.build_id.gnu" semantic conventions. It represents the GNU
+// build ID as found in the `.note.gnu.build-id` ELF section (hex string).
+func ProcessExecutableBuildIDGNU(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGNUKey.String(val)
+}
+
+// ProcessExecutableBuildIDGo returns an attribute KeyValue conforming to the
+// "process.executable.build_id.go" semantic conventions. It represents the Go
+// build ID as retrieved by `go tool buildid `.
+func ProcessExecutableBuildIDGo(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDGoKey.String(val)
+}
+
+// ProcessExecutableBuildIDHtlhash returns an attribute KeyValue conforming to
+// the "process.executable.build_id.htlhash" semantic conventions. It represents
+// the profiling specific build ID for executables. See the OTel specification
+// for Profiles for more information.
+func ProcessExecutableBuildIDHtlhash(val string) attribute.KeyValue {
+ return ProcessExecutableBuildIDHtlhashKey.String(val)
+}
+
+// ProcessExecutableName returns an attribute KeyValue conforming to the
+// "process.executable.name" semantic conventions. It represents the name of the
+// process executable. On Linux based systems, this SHOULD be set to the base
+// name of the target of `/proc/[pid]/exe`. On Windows, this SHOULD be set to the
+// base name of `GetProcessImageFileNameW`.
+func ProcessExecutableName(val string) attribute.KeyValue {
+ return ProcessExecutableNameKey.String(val)
+}
+
+// ProcessExecutablePath returns an attribute KeyValue conforming to the
+// "process.executable.path" semantic conventions. It represents the full path to
+// the process executable. On Linux based systems, can be set to the target of
+// `proc/[pid]/exe`. On Windows, can be set to the result of
+// `GetProcessImageFileNameW`.
+func ProcessExecutablePath(val string) attribute.KeyValue {
+ return ProcessExecutablePathKey.String(val)
+}
+
+// ProcessExitCode returns an attribute KeyValue conforming to the
+// "process.exit.code" semantic conventions. It represents the exit code of the
+// process.
+func ProcessExitCode(val int) attribute.KeyValue {
+ return ProcessExitCodeKey.Int(val)
+}
+
+// ProcessExitTime returns an attribute KeyValue conforming to the
+// "process.exit.time" semantic conventions. It represents the date and time the
+// process exited, in ISO 8601 format.
+func ProcessExitTime(val string) attribute.KeyValue {
+ return ProcessExitTimeKey.String(val)
+}
+
+// ProcessGroupLeaderPID returns an attribute KeyValue conforming to the
+// "process.group_leader.pid" semantic conventions. It represents the PID of the
+// process's group leader. This is also the process group ID (PGID) of the
+// process.
+func ProcessGroupLeaderPID(val int) attribute.KeyValue {
+ return ProcessGroupLeaderPIDKey.Int(val)
+}
+
+// ProcessInteractive returns an attribute KeyValue conforming to the
+// "process.interactive" semantic conventions. It represents the whether the
+// process is connected to an interactive shell.
+func ProcessInteractive(val bool) attribute.KeyValue {
+ return ProcessInteractiveKey.Bool(val)
+}
+
+// ProcessLinuxCgroup returns an attribute KeyValue conforming to the
+// "process.linux.cgroup" semantic conventions. It represents the control group
+// associated with the process.
+func ProcessLinuxCgroup(val string) attribute.KeyValue {
+ return ProcessLinuxCgroupKey.String(val)
+}
+
+// ProcessOwner returns an attribute KeyValue conforming to the "process.owner"
+// semantic conventions. It represents the username of the user that owns the
+// process.
+func ProcessOwner(val string) attribute.KeyValue {
+ return ProcessOwnerKey.String(val)
+}
+
+// ProcessParentPID returns an attribute KeyValue conforming to the
+// "process.parent_pid" semantic conventions. It represents the parent Process
+// identifier (PPID).
+func ProcessParentPID(val int) attribute.KeyValue {
+ return ProcessParentPIDKey.Int(val)
+}
+
+// ProcessPID returns an attribute KeyValue conforming to the "process.pid"
+// semantic conventions. It represents the process identifier (PID).
+func ProcessPID(val int) attribute.KeyValue {
+ return ProcessPIDKey.Int(val)
+}
+
+// ProcessRealUserID returns an attribute KeyValue conforming to the
+// "process.real_user.id" semantic conventions. It represents the real user ID
+// (RUID) of the process.
+func ProcessRealUserID(val int) attribute.KeyValue {
+ return ProcessRealUserIDKey.Int(val)
+}
+
+// ProcessRealUserName returns an attribute KeyValue conforming to the
+// "process.real_user.name" semantic conventions. It represents the username of
+// the real user of the process.
+func ProcessRealUserName(val string) attribute.KeyValue {
+ return ProcessRealUserNameKey.String(val)
+}
+
+// ProcessRuntimeDescription returns an attribute KeyValue conforming to the
+// "process.runtime.description" semantic conventions. It represents an
+// additional description about the runtime of the process, for example a
+// specific vendor customization of the runtime environment.
+func ProcessRuntimeDescription(val string) attribute.KeyValue {
+ return ProcessRuntimeDescriptionKey.String(val)
+}
+
+// ProcessRuntimeName returns an attribute KeyValue conforming to the
+// "process.runtime.name" semantic conventions. It represents the name of the
+// runtime of this process.
+func ProcessRuntimeName(val string) attribute.KeyValue {
+ return ProcessRuntimeNameKey.String(val)
+}
+
+// ProcessRuntimeVersion returns an attribute KeyValue conforming to the
+// "process.runtime.version" semantic conventions. It represents the version of
+// the runtime of this process, as returned by the runtime without modification.
+func ProcessRuntimeVersion(val string) attribute.KeyValue {
+ return ProcessRuntimeVersionKey.String(val)
+}
+
+// ProcessSavedUserID returns an attribute KeyValue conforming to the
+// "process.saved_user.id" semantic conventions. It represents the saved user ID
+// (SUID) of the process.
+func ProcessSavedUserID(val int) attribute.KeyValue {
+ return ProcessSavedUserIDKey.Int(val)
+}
+
+// ProcessSavedUserName returns an attribute KeyValue conforming to the
+// "process.saved_user.name" semantic conventions. It represents the username of
+// the saved user.
+func ProcessSavedUserName(val string) attribute.KeyValue {
+ return ProcessSavedUserNameKey.String(val)
+}
+
+// ProcessSessionLeaderPID returns an attribute KeyValue conforming to the
+// "process.session_leader.pid" semantic conventions. It represents the PID of
+// the process's session leader. This is also the session ID (SID) of the
+// process.
+func ProcessSessionLeaderPID(val int) attribute.KeyValue {
+ return ProcessSessionLeaderPIDKey.Int(val)
+}
+
+// ProcessTitle returns an attribute KeyValue conforming to the "process.title"
+// semantic conventions. It represents the process title (proctitle).
+func ProcessTitle(val string) attribute.KeyValue {
+ return ProcessTitleKey.String(val)
+}
+
+// ProcessUserID returns an attribute KeyValue conforming to the
+// "process.user.id" semantic conventions. It represents the effective user ID
+// (EUID) of the process.
+func ProcessUserID(val int) attribute.KeyValue {
+ return ProcessUserIDKey.Int(val)
+}
+
+// ProcessUserName returns an attribute KeyValue conforming to the
+// "process.user.name" semantic conventions. It represents the username of the
+// effective user of the process.
+func ProcessUserName(val string) attribute.KeyValue {
+ return ProcessUserNameKey.String(val)
+}
+
+// ProcessVpid returns an attribute KeyValue conforming to the "process.vpid"
+// semantic conventions. It represents the virtual process identifier.
+func ProcessVpid(val int) attribute.KeyValue {
+ return ProcessVpidKey.Int(val)
+}
+
+// ProcessWorkingDirectory returns an attribute KeyValue conforming to the
+// "process.working_directory" semantic conventions. It represents the working
+// directory of the process.
+func ProcessWorkingDirectory(val string) attribute.KeyValue {
+ return ProcessWorkingDirectoryKey.String(val)
+}
+
+// Enum values for process.context_switch_type
+var (
+ // voluntary
+ // Stability: development
+ ProcessContextSwitchTypeVoluntary = ProcessContextSwitchTypeKey.String("voluntary")
+ // involuntary
+ // Stability: development
+ ProcessContextSwitchTypeInvoluntary = ProcessContextSwitchTypeKey.String("involuntary")
+)
+
+// Enum values for process.paging.fault_type
+var (
+ // major
+ // Stability: development
+ ProcessPagingFaultTypeMajor = ProcessPagingFaultTypeKey.String("major")
+ // minor
+ // Stability: development
+ ProcessPagingFaultTypeMinor = ProcessPagingFaultTypeKey.String("minor")
+)
+
+// Namespace: profile
+const (
+ // ProfileFrameTypeKey is the attribute Key conforming to the
+ // "profile.frame.type" semantic conventions. It represents the describes the
+ // interpreter or compiler of a single frame.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "cpython"
+ ProfileFrameTypeKey = attribute.Key("profile.frame.type")
+)
+
+// Enum values for profile.frame.type
+var (
+ // [.NET]
+ //
+ // Stability: development
+ //
+ // [.NET]: https://wikipedia.org/wiki/.NET
+ ProfileFrameTypeDotnet = ProfileFrameTypeKey.String("dotnet")
+ // [JVM]
+ //
+ // Stability: development
+ //
+ // [JVM]: https://wikipedia.org/wiki/Java_virtual_machine
+ ProfileFrameTypeJVM = ProfileFrameTypeKey.String("jvm")
+ // [Kernel]
+ //
+ // Stability: development
+ //
+ // [Kernel]: https://wikipedia.org/wiki/Kernel_(operating_system)
+ ProfileFrameTypeKernel = ProfileFrameTypeKey.String("kernel")
+ // Can be one of but not limited to [C], [C++], [Go] or [Rust]. If possible, a
+ // more precise value MUST be used.
+ //
+ // Stability: development
+ //
+ // [C]: https://wikipedia.org/wiki/C_(programming_language)
+ // [C++]: https://wikipedia.org/wiki/C%2B%2B
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeNative = ProfileFrameTypeKey.String("native")
+ // [Perl]
+ //
+ // Stability: development
+ //
+ // [Perl]: https://wikipedia.org/wiki/Perl
+ ProfileFrameTypePerl = ProfileFrameTypeKey.String("perl")
+ // [PHP]
+ //
+ // Stability: development
+ //
+ // [PHP]: https://wikipedia.org/wiki/PHP
+ ProfileFrameTypePHP = ProfileFrameTypeKey.String("php")
+ // [Python]
+ //
+ // Stability: development
+ //
+ // [Python]: https://wikipedia.org/wiki/Python_(programming_language)
+ ProfileFrameTypeCpython = ProfileFrameTypeKey.String("cpython")
+ // [Ruby]
+ //
+ // Stability: development
+ //
+ // [Ruby]: https://wikipedia.org/wiki/Ruby_(programming_language)
+ ProfileFrameTypeRuby = ProfileFrameTypeKey.String("ruby")
+ // [V8JS]
+ //
+ // Stability: development
+ //
+ // [V8JS]: https://wikipedia.org/wiki/V8_(JavaScript_engine)
+ ProfileFrameTypeV8JS = ProfileFrameTypeKey.String("v8js")
+ // [Erlang]
+ //
+ // Stability: development
+ //
+ // [Erlang]: https://en.wikipedia.org/wiki/BEAM_(Erlang_virtual_machine)
+ ProfileFrameTypeBeam = ProfileFrameTypeKey.String("beam")
+ // [Go],
+ //
+ // Stability: development
+ //
+ // [Go]: https://wikipedia.org/wiki/Go_(programming_language)
+ ProfileFrameTypeGo = ProfileFrameTypeKey.String("go")
+ // [Rust]
+ //
+ // Stability: development
+ //
+ // [Rust]: https://wikipedia.org/wiki/Rust_(programming_language)
+ ProfileFrameTypeRust = ProfileFrameTypeKey.String("rust")
+)
+
+// Namespace: rpc
+const (
+ // RPCConnectRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.connect_rpc.error_code" semantic conventions. It represents the
+ // [error codes] of the Connect request. Error codes are always string values.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [error codes]: https://connectrpc.com//docs/protocol/#error-codes
+ RPCConnectRPCErrorCodeKey = attribute.Key("rpc.connect_rpc.error_code")
+
+ // RPCGRPCStatusCodeKey is the attribute Key conforming to the
+ // "rpc.grpc.status_code" semantic conventions. It represents the
+ // [numeric status code] of the gRPC request.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [numeric status code]: https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md
+ RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code")
+
+ // RPCJSONRPCErrorCodeKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+ // property of response if it is an error response.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: -32700, 100
+ RPCJSONRPCErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code")
+
+ // RPCJSONRPCErrorMessageKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.error_message" semantic conventions. It represents the
+ // `error.message` property of response if it is an error response.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Parse error", "User already exists"
+ RPCJSONRPCErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message")
+
+ // RPCJSONRPCRequestIDKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.request_id" semantic conventions. It represents the `id`
+ // property of request or response. Since protocol allows id to be int, string,
+ // `null` or missing (for notifications), value is expected to be cast to string
+ // for simplicity. Use empty string in case of `null` value. Omit entirely if
+ // this is a notification.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "10", "request-7", ""
+ RPCJSONRPCRequestIDKey = attribute.Key("rpc.jsonrpc.request_id")
+
+ // RPCJSONRPCVersionKey is the attribute Key conforming to the
+ // "rpc.jsonrpc.version" semantic conventions. It represents the protocol
+ // version as in `jsonrpc` property of request/response. Since JSON-RPC 1.0
+ // doesn't specify this, the value can be omitted.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2.0", "1.0"
+ RPCJSONRPCVersionKey = attribute.Key("rpc.jsonrpc.version")
+
+ // RPCMessageCompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.compressed_size" semantic conventions. It represents the
+ // compressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageCompressedSizeKey = attribute.Key("rpc.message.compressed_size")
+
+ // RPCMessageIDKey is the attribute Key conforming to the "rpc.message.id"
+ // semantic conventions. It MUST be calculated as two different counters
+ // starting from `1` one for sent messages and one for received message..
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This way we guarantee that the values will be consistent between
+ // different implementations.
+ RPCMessageIDKey = attribute.Key("rpc.message.id")
+
+ // RPCMessageTypeKey is the attribute Key conforming to the "rpc.message.type"
+ // semantic conventions. It represents the whether this is a received or sent
+ // message.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageTypeKey = attribute.Key("rpc.message.type")
+
+ // RPCMessageUncompressedSizeKey is the attribute Key conforming to the
+ // "rpc.message.uncompressed_size" semantic conventions. It represents the
+ // uncompressed size of the message in bytes.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCMessageUncompressedSizeKey = attribute.Key("rpc.message.uncompressed_size")
+
+ // RPCMethodKey is the attribute Key conforming to the "rpc.method" semantic
+ // conventions. It represents the name of the (logical) method being called,
+ // must be equal to the $method part in the span name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: exampleMethod
+ // Note: This is the logical name of the method from the RPC interface
+ // perspective, which can be different from the name of any implementing
+ // method/function. The `code.function.name` attribute may be used to store the
+ // latter (e.g., method actually executing the call on the server side, RPC
+ // client stub method on the client side).
+ RPCMethodKey = attribute.Key("rpc.method")
+
+ // RPCServiceKey is the attribute Key conforming to the "rpc.service" semantic
+ // conventions. It represents the full (logical) name of the service being
+ // called, including its package name, if applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: myservice.EchoService
+ // Note: This is the logical name of the service from the RPC interface
+ // perspective, which can be different from the name of any implementing class.
+ // The `code.namespace` attribute may be used to store the latter (despite the
+ // attribute name, it may include a class name; e.g., class with method actually
+ // executing the call on the server side, RPC client stub class on the client
+ // side).
+ RPCServiceKey = attribute.Key("rpc.service")
+
+ // RPCSystemKey is the attribute Key conforming to the "rpc.system" semantic
+ // conventions. It represents a string identifying the remoting system. See
+ // below for a list of well-known identifiers.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ RPCSystemKey = attribute.Key("rpc.system")
+)
+
+// RPCConnectRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.request.metadata" semantic conventions. It represents the
+// connect request metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.request.metadata."+key, val)
+}
+
+// RPCConnectRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.connect_rpc.response.metadata" semantic conventions. It represents the
+// connect response metadata, `` being the normalized Connect Metadata key
+// (lowercase), the value being the metadata values.
+func RPCConnectRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.connect_rpc.response.metadata."+key, val)
+}
+
+// RPCGRPCRequestMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.request.metadata" semantic conventions. It represents the gRPC
+// request metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCRequestMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.request.metadata."+key, val)
+}
+
+// RPCGRPCResponseMetadata returns an attribute KeyValue conforming to the
+// "rpc.grpc.response.metadata" semantic conventions. It represents the gRPC
+// response metadata, `` being the normalized gRPC Metadata key (lowercase),
+// the value being the metadata values.
+func RPCGRPCResponseMetadata(key string, val ...string) attribute.KeyValue {
+ return attribute.StringSlice("rpc.grpc.response.metadata."+key, val)
+}
+
+// RPCJSONRPCErrorCode returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_code" semantic conventions. It represents the `error.code`
+// property of response if it is an error response.
+func RPCJSONRPCErrorCode(val int) attribute.KeyValue {
+ return RPCJSONRPCErrorCodeKey.Int(val)
+}
+
+// RPCJSONRPCErrorMessage returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.error_message" semantic conventions. It represents the
+// `error.message` property of response if it is an error response.
+func RPCJSONRPCErrorMessage(val string) attribute.KeyValue {
+ return RPCJSONRPCErrorMessageKey.String(val)
+}
+
+// RPCJSONRPCRequestID returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.request_id" semantic conventions. It represents the `id` property
+// of request or response. Since protocol allows id to be int, string, `null` or
+// missing (for notifications), value is expected to be cast to string for
+// simplicity. Use empty string in case of `null` value. Omit entirely if this is
+// a notification.
+func RPCJSONRPCRequestID(val string) attribute.KeyValue {
+ return RPCJSONRPCRequestIDKey.String(val)
+}
+
+// RPCJSONRPCVersion returns an attribute KeyValue conforming to the
+// "rpc.jsonrpc.version" semantic conventions. It represents the protocol version
+// as in `jsonrpc` property of request/response. Since JSON-RPC 1.0 doesn't
+// specify this, the value can be omitted.
+func RPCJSONRPCVersion(val string) attribute.KeyValue {
+ return RPCJSONRPCVersionKey.String(val)
+}
+
+// RPCMessageCompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.compressed_size" semantic conventions. It represents the
+// compressed size of the message in bytes.
+func RPCMessageCompressedSize(val int) attribute.KeyValue {
+ return RPCMessageCompressedSizeKey.Int(val)
+}
+
+// RPCMessageID returns an attribute KeyValue conforming to the "rpc.message.id"
+// semantic conventions. It MUST be calculated as two different counters starting
+// from `1` one for sent messages and one for received message..
+func RPCMessageID(val int) attribute.KeyValue {
+ return RPCMessageIDKey.Int(val)
+}
+
+// RPCMessageUncompressedSize returns an attribute KeyValue conforming to the
+// "rpc.message.uncompressed_size" semantic conventions. It represents the
+// uncompressed size of the message in bytes.
+func RPCMessageUncompressedSize(val int) attribute.KeyValue {
+ return RPCMessageUncompressedSizeKey.Int(val)
+}
+
+// RPCMethod returns an attribute KeyValue conforming to the "rpc.method"
+// semantic conventions. It represents the name of the (logical) method being
+// called, must be equal to the $method part in the span name.
+func RPCMethod(val string) attribute.KeyValue {
+ return RPCMethodKey.String(val)
+}
+
+// RPCService returns an attribute KeyValue conforming to the "rpc.service"
+// semantic conventions. It represents the full (logical) name of the service
+// being called, including its package name, if applicable.
+func RPCService(val string) attribute.KeyValue {
+ return RPCServiceKey.String(val)
+}
+
+// Enum values for rpc.connect_rpc.error_code
+var (
+ // cancelled
+ // Stability: development
+ RPCConnectRPCErrorCodeCancelled = RPCConnectRPCErrorCodeKey.String("cancelled")
+ // unknown
+ // Stability: development
+ RPCConnectRPCErrorCodeUnknown = RPCConnectRPCErrorCodeKey.String("unknown")
+ // invalid_argument
+ // Stability: development
+ RPCConnectRPCErrorCodeInvalidArgument = RPCConnectRPCErrorCodeKey.String("invalid_argument")
+ // deadline_exceeded
+ // Stability: development
+ RPCConnectRPCErrorCodeDeadlineExceeded = RPCConnectRPCErrorCodeKey.String("deadline_exceeded")
+ // not_found
+ // Stability: development
+ RPCConnectRPCErrorCodeNotFound = RPCConnectRPCErrorCodeKey.String("not_found")
+ // already_exists
+ // Stability: development
+ RPCConnectRPCErrorCodeAlreadyExists = RPCConnectRPCErrorCodeKey.String("already_exists")
+ // permission_denied
+ // Stability: development
+ RPCConnectRPCErrorCodePermissionDenied = RPCConnectRPCErrorCodeKey.String("permission_denied")
+ // resource_exhausted
+ // Stability: development
+ RPCConnectRPCErrorCodeResourceExhausted = RPCConnectRPCErrorCodeKey.String("resource_exhausted")
+ // failed_precondition
+ // Stability: development
+ RPCConnectRPCErrorCodeFailedPrecondition = RPCConnectRPCErrorCodeKey.String("failed_precondition")
+ // aborted
+ // Stability: development
+ RPCConnectRPCErrorCodeAborted = RPCConnectRPCErrorCodeKey.String("aborted")
+ // out_of_range
+ // Stability: development
+ RPCConnectRPCErrorCodeOutOfRange = RPCConnectRPCErrorCodeKey.String("out_of_range")
+ // unimplemented
+ // Stability: development
+ RPCConnectRPCErrorCodeUnimplemented = RPCConnectRPCErrorCodeKey.String("unimplemented")
+ // internal
+ // Stability: development
+ RPCConnectRPCErrorCodeInternal = RPCConnectRPCErrorCodeKey.String("internal")
+ // unavailable
+ // Stability: development
+ RPCConnectRPCErrorCodeUnavailable = RPCConnectRPCErrorCodeKey.String("unavailable")
+ // data_loss
+ // Stability: development
+ RPCConnectRPCErrorCodeDataLoss = RPCConnectRPCErrorCodeKey.String("data_loss")
+ // unauthenticated
+ // Stability: development
+ RPCConnectRPCErrorCodeUnauthenticated = RPCConnectRPCErrorCodeKey.String("unauthenticated")
+)
+
+// Enum values for rpc.grpc.status_code
+var (
+ // OK
+ // Stability: development
+ RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0)
+ // CANCELLED
+ // Stability: development
+ RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1)
+ // UNKNOWN
+ // Stability: development
+ RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2)
+ // INVALID_ARGUMENT
+ // Stability: development
+ RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3)
+ // DEADLINE_EXCEEDED
+ // Stability: development
+ RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4)
+ // NOT_FOUND
+ // Stability: development
+ RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5)
+ // ALREADY_EXISTS
+ // Stability: development
+ RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6)
+ // PERMISSION_DENIED
+ // Stability: development
+ RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7)
+ // RESOURCE_EXHAUSTED
+ // Stability: development
+ RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8)
+ // FAILED_PRECONDITION
+ // Stability: development
+ RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9)
+ // ABORTED
+ // Stability: development
+ RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10)
+ // OUT_OF_RANGE
+ // Stability: development
+ RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11)
+ // UNIMPLEMENTED
+ // Stability: development
+ RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12)
+ // INTERNAL
+ // Stability: development
+ RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13)
+ // UNAVAILABLE
+ // Stability: development
+ RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14)
+ // DATA_LOSS
+ // Stability: development
+ RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15)
+ // UNAUTHENTICATED
+ // Stability: development
+ RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16)
+)
+
+// Enum values for rpc.message.type
+var (
+ // sent
+ // Stability: development
+ RPCMessageTypeSent = RPCMessageTypeKey.String("SENT")
+ // received
+ // Stability: development
+ RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED")
+)
+
+// Enum values for rpc.system
+var (
+ // gRPC
+ // Stability: development
+ RPCSystemGRPC = RPCSystemKey.String("grpc")
+ // Java RMI
+ // Stability: development
+ RPCSystemJavaRmi = RPCSystemKey.String("java_rmi")
+ // .NET WCF
+ // Stability: development
+ RPCSystemDotnetWcf = RPCSystemKey.String("dotnet_wcf")
+ // Apache Dubbo
+ // Stability: development
+ RPCSystemApacheDubbo = RPCSystemKey.String("apache_dubbo")
+ // Connect RPC
+ // Stability: development
+ RPCSystemConnectRPC = RPCSystemKey.String("connect_rpc")
+)
+
+// Namespace: security_rule
+const (
+ // SecurityRuleCategoryKey is the attribute Key conforming to the
+ // "security_rule.category" semantic conventions. It represents a categorization
+ // value keyword used by the entity using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Attempted Information Leak"
+ SecurityRuleCategoryKey = attribute.Key("security_rule.category")
+
+ // SecurityRuleDescriptionKey is the attribute Key conforming to the
+ // "security_rule.description" semantic conventions. It represents the
+ // description of the rule generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Block requests to public DNS over HTTPS / TLS protocols"
+ SecurityRuleDescriptionKey = attribute.Key("security_rule.description")
+
+ // SecurityRuleLicenseKey is the attribute Key conforming to the
+ // "security_rule.license" semantic conventions. It represents the name of the
+ // license under which the rule used to generate this event is made available.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Apache 2.0"
+ SecurityRuleLicenseKey = attribute.Key("security_rule.license")
+
+ // SecurityRuleNameKey is the attribute Key conforming to the
+ // "security_rule.name" semantic conventions. It represents the name of the rule
+ // or signature generating the event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "BLOCK_DNS_over_TLS"
+ SecurityRuleNameKey = attribute.Key("security_rule.name")
+
+ // SecurityRuleReferenceKey is the attribute Key conforming to the
+ // "security_rule.reference" semantic conventions. It represents the reference
+ // URL to additional information about the rule used to generate this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://en.wikipedia.org/wiki/DNS_over_TLS"
+ // Note: The URL can point to the vendor’s documentation about the rule. If
+ // that’s not available, it can also be a link to a more general page
+ // describing this type of alert.
+ SecurityRuleReferenceKey = attribute.Key("security_rule.reference")
+
+ // SecurityRuleRulesetNameKey is the attribute Key conforming to the
+ // "security_rule.ruleset.name" semantic conventions. It represents the name of
+ // the ruleset, policy, group, or parent category in which the rule used to
+ // generate this event is a member.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Standard_Protocol_Filters"
+ SecurityRuleRulesetNameKey = attribute.Key("security_rule.ruleset.name")
+
+ // SecurityRuleUUIDKey is the attribute Key conforming to the
+ // "security_rule.uuid" semantic conventions. It represents a rule ID that is
+ // unique within the scope of a set or group of agents, observers, or other
+ // entities using the rule for detection of this event.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "550e8400-e29b-41d4-a716-446655440000", "1100110011"
+ SecurityRuleUUIDKey = attribute.Key("security_rule.uuid")
+
+ // SecurityRuleVersionKey is the attribute Key conforming to the
+ // "security_rule.version" semantic conventions. It represents the version /
+ // revision of the rule being used for analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.0.0"
+ SecurityRuleVersionKey = attribute.Key("security_rule.version")
+)
+
+// SecurityRuleCategory returns an attribute KeyValue conforming to the
+// "security_rule.category" semantic conventions. It represents a categorization
+// value keyword used by the entity using the rule for detection of this event.
+func SecurityRuleCategory(val string) attribute.KeyValue {
+ return SecurityRuleCategoryKey.String(val)
+}
+
+// SecurityRuleDescription returns an attribute KeyValue conforming to the
+// "security_rule.description" semantic conventions. It represents the
+// description of the rule generating the event.
+func SecurityRuleDescription(val string) attribute.KeyValue {
+ return SecurityRuleDescriptionKey.String(val)
+}
+
+// SecurityRuleLicense returns an attribute KeyValue conforming to the
+// "security_rule.license" semantic conventions. It represents the name of the
+// license under which the rule used to generate this event is made available.
+func SecurityRuleLicense(val string) attribute.KeyValue {
+ return SecurityRuleLicenseKey.String(val)
+}
+
+// SecurityRuleName returns an attribute KeyValue conforming to the
+// "security_rule.name" semantic conventions. It represents the name of the rule
+// or signature generating the event.
+func SecurityRuleName(val string) attribute.KeyValue {
+ return SecurityRuleNameKey.String(val)
+}
+
+// SecurityRuleReference returns an attribute KeyValue conforming to the
+// "security_rule.reference" semantic conventions. It represents the reference
+// URL to additional information about the rule used to generate this event.
+func SecurityRuleReference(val string) attribute.KeyValue {
+ return SecurityRuleReferenceKey.String(val)
+}
+
+// SecurityRuleRulesetName returns an attribute KeyValue conforming to the
+// "security_rule.ruleset.name" semantic conventions. It represents the name of
+// the ruleset, policy, group, or parent category in which the rule used to
+// generate this event is a member.
+func SecurityRuleRulesetName(val string) attribute.KeyValue {
+ return SecurityRuleRulesetNameKey.String(val)
+}
+
+// SecurityRuleUUID returns an attribute KeyValue conforming to the
+// "security_rule.uuid" semantic conventions. It represents a rule ID that is
+// unique within the scope of a set or group of agents, observers, or other
+// entities using the rule for detection of this event.
+func SecurityRuleUUID(val string) attribute.KeyValue {
+ return SecurityRuleUUIDKey.String(val)
+}
+
+// SecurityRuleVersion returns an attribute KeyValue conforming to the
+// "security_rule.version" semantic conventions. It represents the version /
+// revision of the rule being used for analysis.
+func SecurityRuleVersion(val string) attribute.KeyValue {
+ return SecurityRuleVersionKey.String(val)
+}
+
+// Namespace: server
+const (
+ // ServerAddressKey is the attribute Key conforming to the "server.address"
+ // semantic conventions. It represents the server domain name if available
+ // without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.address` SHOULD represent the server address behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerAddressKey = attribute.Key("server.address")
+
+ // ServerPortKey is the attribute Key conforming to the "server.port" semantic
+ // conventions. It represents the server port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: 80, 8080, 443
+ // Note: When observed from the client side, and when communicating through an
+ // intermediary, `server.port` SHOULD represent the server port behind any
+ // intermediaries, for example proxies, if it's available.
+ ServerPortKey = attribute.Key("server.port")
+)
+
+// ServerAddress returns an attribute KeyValue conforming to the "server.address"
+// semantic conventions. It represents the server domain name if available
+// without reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func ServerAddress(val string) attribute.KeyValue {
+ return ServerAddressKey.String(val)
+}
+
+// ServerPort returns an attribute KeyValue conforming to the "server.port"
+// semantic conventions. It represents the server port number.
+func ServerPort(val int) attribute.KeyValue {
+ return ServerPortKey.Int(val)
+}
+
+// Namespace: service
+const (
+ // ServiceInstanceIDKey is the attribute Key conforming to the
+ // "service.instance.id" semantic conventions. It represents the string ID of
+ // the service instance.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "627cc493-f310-47de-96bd-71410b7dec09"
+ // Note: MUST be unique for each instance of the same
+ // `service.namespace,service.name` pair (in other words
+ // `service.namespace,service.name,service.instance.id` triplet MUST be globally
+ // unique). The ID helps to
+ // distinguish instances of the same service that exist at the same time (e.g.
+ // instances of a horizontally scaled
+ // service).
+ //
+ // Implementations, such as SDKs, are recommended to generate a random Version 1
+ // or Version 4 [RFC
+ // 4122] UUID, but are free to use an inherent unique ID as
+ // the source of
+ // this value if stability is desirable. In that case, the ID SHOULD be used as
+ // source of a UUID Version 5 and
+ // SHOULD use the following UUID as the namespace:
+ // `4d63009a-8d0f-11ee-aad7-4c796ed8e320`.
+ //
+ // UUIDs are typically recommended, as only an opaque value for the purposes of
+ // identifying a service instance is
+ // needed. Similar to what can be seen in the man page for the
+ // [`/etc/machine-id`] file, the underlying
+ // data, such as pod name and namespace should be treated as confidential, being
+ // the user's choice to expose it
+ // or not via another resource attribute.
+ //
+ // For applications running behind an application server (like unicorn), we do
+ // not recommend using one identifier
+ // for all processes participating in the application. Instead, it's recommended
+ // each division (e.g. a worker
+ // thread in unicorn) to have its own instance.id.
+ //
+ // It's not recommended for a Collector to set `service.instance.id` if it can't
+ // unambiguously determine the
+ // service instance that is generating that telemetry. For instance, creating an
+ // UUID based on `pod.name` will
+ // likely be wrong, as the Collector might not know from which container within
+ // that pod the telemetry originated.
+ // However, Collectors can set the `service.instance.id` if they can
+ // unambiguously determine the service instance
+ // for that telemetry. This is typically the case for scraping receivers, as
+ // they know the target address and
+ // port.
+ //
+ // [RFC
+ // 4122]: https://www.ietf.org/rfc/rfc4122.txt
+ // [`/etc/machine-id`]: https://www.freedesktop.org/software/systemd/man/latest/machine-id.html
+ ServiceInstanceIDKey = attribute.Key("service.instance.id")
+
+ // ServiceNameKey is the attribute Key conforming to the "service.name" semantic
+ // conventions. It represents the logical name of the service.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "shoppingcart"
+ // Note: MUST be the same for all instances of horizontally scaled services. If
+ // the value was not specified, SDKs MUST fallback to `unknown_service:`
+ // concatenated with [`process.executable.name`], e.g. `unknown_service:bash`.
+ // If `process.executable.name` is not available, the value MUST be set to
+ // `unknown_service`.
+ //
+ // [`process.executable.name`]: process.md
+ ServiceNameKey = attribute.Key("service.name")
+
+ // ServiceNamespaceKey is the attribute Key conforming to the
+ // "service.namespace" semantic conventions. It represents a namespace for
+ // `service.name`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Shop"
+ // Note: A string value having a meaning that helps to distinguish a group of
+ // services, for example the team name that owns a group of services.
+ // `service.name` is expected to be unique within the same namespace. If
+ // `service.namespace` is not specified in the Resource then `service.name` is
+ // expected to be unique for all services that have no explicit namespace
+ // defined (so the empty/unspecified namespace is simply one more valid
+ // namespace). Zero-length namespace string is assumed equal to unspecified
+ // namespace.
+ ServiceNamespaceKey = attribute.Key("service.namespace")
+
+ // ServiceVersionKey is the attribute Key conforming to the "service.version"
+ // semantic conventions. It represents the version string of the service API or
+ // implementation. The format is not defined by these conventions.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "2.0.0", "a01dbef8a"
+ ServiceVersionKey = attribute.Key("service.version")
+)
+
+// ServiceInstanceID returns an attribute KeyValue conforming to the
+// "service.instance.id" semantic conventions. It represents the string ID of the
+// service instance.
+func ServiceInstanceID(val string) attribute.KeyValue {
+ return ServiceInstanceIDKey.String(val)
+}
+
+// ServiceName returns an attribute KeyValue conforming to the "service.name"
+// semantic conventions. It represents the logical name of the service.
+func ServiceName(val string) attribute.KeyValue {
+ return ServiceNameKey.String(val)
+}
+
+// ServiceNamespace returns an attribute KeyValue conforming to the
+// "service.namespace" semantic conventions. It represents a namespace for
+// `service.name`.
+func ServiceNamespace(val string) attribute.KeyValue {
+ return ServiceNamespaceKey.String(val)
+}
+
+// ServiceVersion returns an attribute KeyValue conforming to the
+// "service.version" semantic conventions. It represents the version string of
+// the service API or implementation. The format is not defined by these
+// conventions.
+func ServiceVersion(val string) attribute.KeyValue {
+ return ServiceVersionKey.String(val)
+}
+
+// Namespace: session
+const (
+ // SessionIDKey is the attribute Key conforming to the "session.id" semantic
+ // conventions. It represents a unique id to identify a session.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionIDKey = attribute.Key("session.id")
+
+ // SessionPreviousIDKey is the attribute Key conforming to the
+ // "session.previous_id" semantic conventions. It represents the previous
+ // `session.id` for this user, when known.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 00112233-4455-6677-8899-aabbccddeeff
+ SessionPreviousIDKey = attribute.Key("session.previous_id")
+)
+
+// SessionID returns an attribute KeyValue conforming to the "session.id"
+// semantic conventions. It represents a unique id to identify a session.
+func SessionID(val string) attribute.KeyValue {
+ return SessionIDKey.String(val)
+}
+
+// SessionPreviousID returns an attribute KeyValue conforming to the
+// "session.previous_id" semantic conventions. It represents the previous
+// `session.id` for this user, when known.
+func SessionPreviousID(val string) attribute.KeyValue {
+ return SessionPreviousIDKey.String(val)
+}
+
+// Namespace: signalr
+const (
+ // SignalRConnectionStatusKey is the attribute Key conforming to the
+ // "signalr.connection.status" semantic conventions. It represents the signalR
+ // HTTP connection closure status.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "app_shutdown", "timeout"
+ SignalRConnectionStatusKey = attribute.Key("signalr.connection.status")
+
+ // SignalRTransportKey is the attribute Key conforming to the
+ // "signalr.transport" semantic conventions. It represents the
+ // [SignalR transport type].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "web_sockets", "long_polling"
+ //
+ // [SignalR transport type]: https://github.com/dotnet/aspnetcore/blob/main/src/SignalR/docs/specs/TransportProtocols.md
+ SignalRTransportKey = attribute.Key("signalr.transport")
+)
+
+// Enum values for signalr.connection.status
+var (
+ // The connection was closed normally.
+ // Stability: stable
+ SignalRConnectionStatusNormalClosure = SignalRConnectionStatusKey.String("normal_closure")
+ // The connection was closed due to a timeout.
+ // Stability: stable
+ SignalRConnectionStatusTimeout = SignalRConnectionStatusKey.String("timeout")
+ // The connection was closed because the app is shutting down.
+ // Stability: stable
+ SignalRConnectionStatusAppShutdown = SignalRConnectionStatusKey.String("app_shutdown")
+)
+
+// Enum values for signalr.transport
+var (
+ // ServerSentEvents protocol
+ // Stability: stable
+ SignalRTransportServerSentEvents = SignalRTransportKey.String("server_sent_events")
+ // LongPolling protocol
+ // Stability: stable
+ SignalRTransportLongPolling = SignalRTransportKey.String("long_polling")
+ // WebSockets protocol
+ // Stability: stable
+ SignalRTransportWebSockets = SignalRTransportKey.String("web_sockets")
+)
+
+// Namespace: source
+const (
+ // SourceAddressKey is the attribute Key conforming to the "source.address"
+ // semantic conventions. It represents the source address - domain name if
+ // available without reverse DNS lookup; otherwise, IP address or Unix domain
+ // socket name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "source.example.com", "10.1.2.80", "/tmp/my.sock"
+ // Note: When observed from the destination side, and when communicating through
+ // an intermediary, `source.address` SHOULD represent the source address behind
+ // any intermediaries, for example proxies, if it's available.
+ SourceAddressKey = attribute.Key("source.address")
+
+ // SourcePortKey is the attribute Key conforming to the "source.port" semantic
+ // conventions. It represents the source port number.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 3389, 2888
+ SourcePortKey = attribute.Key("source.port")
+)
+
+// SourceAddress returns an attribute KeyValue conforming to the "source.address"
+// semantic conventions. It represents the source address - domain name if
+// available without reverse DNS lookup; otherwise, IP address or Unix domain
+// socket name.
+func SourceAddress(val string) attribute.KeyValue {
+ return SourceAddressKey.String(val)
+}
+
+// SourcePort returns an attribute KeyValue conforming to the "source.port"
+// semantic conventions. It represents the source port number.
+func SourcePort(val int) attribute.KeyValue {
+ return SourcePortKey.Int(val)
+}
+
+// Namespace: system
+const (
+ // SystemCPULogicalNumberKey is the attribute Key conforming to the
+ // "system.cpu.logical_number" semantic conventions. It represents the
+ // deprecated, use `cpu.logical_number` instead.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 1
+ SystemCPULogicalNumberKey = attribute.Key("system.cpu.logical_number")
+
+ // SystemDeviceKey is the attribute Key conforming to the "system.device"
+ // semantic conventions. It represents the device identifier.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "(identifier)"
+ SystemDeviceKey = attribute.Key("system.device")
+
+ // SystemFilesystemModeKey is the attribute Key conforming to the
+ // "system.filesystem.mode" semantic conventions. It represents the filesystem
+ // mode.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "rw, ro"
+ SystemFilesystemModeKey = attribute.Key("system.filesystem.mode")
+
+ // SystemFilesystemMountpointKey is the attribute Key conforming to the
+ // "system.filesystem.mountpoint" semantic conventions. It represents the
+ // filesystem mount path.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/mnt/data"
+ SystemFilesystemMountpointKey = attribute.Key("system.filesystem.mountpoint")
+
+ // SystemFilesystemStateKey is the attribute Key conforming to the
+ // "system.filesystem.state" semantic conventions. It represents the filesystem
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "used"
+ SystemFilesystemStateKey = attribute.Key("system.filesystem.state")
+
+ // SystemFilesystemTypeKey is the attribute Key conforming to the
+ // "system.filesystem.type" semantic conventions. It represents the filesystem
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ext4"
+ SystemFilesystemTypeKey = attribute.Key("system.filesystem.type")
+
+ // SystemMemoryStateKey is the attribute Key conforming to the
+ // "system.memory.state" semantic conventions. It represents the memory state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free", "cached"
+ SystemMemoryStateKey = attribute.Key("system.memory.state")
+
+ // SystemPagingDirectionKey is the attribute Key conforming to the
+ // "system.paging.direction" semantic conventions. It represents the paging
+ // access direction.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "in"
+ SystemPagingDirectionKey = attribute.Key("system.paging.direction")
+
+ // SystemPagingStateKey is the attribute Key conforming to the
+ // "system.paging.state" semantic conventions. It represents the memory paging
+ // state.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "free"
+ SystemPagingStateKey = attribute.Key("system.paging.state")
+
+ // SystemPagingTypeKey is the attribute Key conforming to the
+ // "system.paging.type" semantic conventions. It represents the memory paging
+ // type.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "minor"
+ SystemPagingTypeKey = attribute.Key("system.paging.type")
+
+ // SystemProcessStatusKey is the attribute Key conforming to the
+ // "system.process.status" semantic conventions. It represents the process
+ // state, e.g., [Linux Process State Codes].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "running"
+ //
+ // [Linux Process State Codes]: https://man7.org/linux/man-pages/man1/ps.1.html#PROCESS_STATE_CODES
+ SystemProcessStatusKey = attribute.Key("system.process.status")
+)
+
+// SystemCPULogicalNumber returns an attribute KeyValue conforming to the
+// "system.cpu.logical_number" semantic conventions. It represents the
+// deprecated, use `cpu.logical_number` instead.
+func SystemCPULogicalNumber(val int) attribute.KeyValue {
+ return SystemCPULogicalNumberKey.Int(val)
+}
+
+// SystemDevice returns an attribute KeyValue conforming to the "system.device"
+// semantic conventions. It represents the device identifier.
+func SystemDevice(val string) attribute.KeyValue {
+ return SystemDeviceKey.String(val)
+}
+
+// SystemFilesystemMode returns an attribute KeyValue conforming to the
+// "system.filesystem.mode" semantic conventions. It represents the filesystem
+// mode.
+func SystemFilesystemMode(val string) attribute.KeyValue {
+ return SystemFilesystemModeKey.String(val)
+}
+
+// SystemFilesystemMountpoint returns an attribute KeyValue conforming to the
+// "system.filesystem.mountpoint" semantic conventions. It represents the
+// filesystem mount path.
+func SystemFilesystemMountpoint(val string) attribute.KeyValue {
+ return SystemFilesystemMountpointKey.String(val)
+}
+
+// Enum values for system.filesystem.state
+var (
+ // used
+ // Stability: development
+ SystemFilesystemStateUsed = SystemFilesystemStateKey.String("used")
+ // free
+ // Stability: development
+ SystemFilesystemStateFree = SystemFilesystemStateKey.String("free")
+ // reserved
+ // Stability: development
+ SystemFilesystemStateReserved = SystemFilesystemStateKey.String("reserved")
+)
+
+// Enum values for system.filesystem.type
+var (
+ // fat32
+ // Stability: development
+ SystemFilesystemTypeFat32 = SystemFilesystemTypeKey.String("fat32")
+ // exfat
+ // Stability: development
+ SystemFilesystemTypeExfat = SystemFilesystemTypeKey.String("exfat")
+ // ntfs
+ // Stability: development
+ SystemFilesystemTypeNtfs = SystemFilesystemTypeKey.String("ntfs")
+ // refs
+ // Stability: development
+ SystemFilesystemTypeRefs = SystemFilesystemTypeKey.String("refs")
+ // hfsplus
+ // Stability: development
+ SystemFilesystemTypeHfsplus = SystemFilesystemTypeKey.String("hfsplus")
+ // ext4
+ // Stability: development
+ SystemFilesystemTypeExt4 = SystemFilesystemTypeKey.String("ext4")
+)
+
+// Enum values for system.memory.state
+var (
+ // Actual used virtual memory in bytes.
+ // Stability: development
+ SystemMemoryStateUsed = SystemMemoryStateKey.String("used")
+ // free
+ // Stability: development
+ SystemMemoryStateFree = SystemMemoryStateKey.String("free")
+ // buffers
+ // Stability: development
+ SystemMemoryStateBuffers = SystemMemoryStateKey.String("buffers")
+ // cached
+ // Stability: development
+ SystemMemoryStateCached = SystemMemoryStateKey.String("cached")
+)
+
+// Enum values for system.paging.direction
+var (
+ // in
+ // Stability: development
+ SystemPagingDirectionIn = SystemPagingDirectionKey.String("in")
+ // out
+ // Stability: development
+ SystemPagingDirectionOut = SystemPagingDirectionKey.String("out")
+)
+
+// Enum values for system.paging.state
+var (
+ // used
+ // Stability: development
+ SystemPagingStateUsed = SystemPagingStateKey.String("used")
+ // free
+ // Stability: development
+ SystemPagingStateFree = SystemPagingStateKey.String("free")
+)
+
+// Enum values for system.paging.type
+var (
+ // major
+ // Stability: development
+ SystemPagingTypeMajor = SystemPagingTypeKey.String("major")
+ // minor
+ // Stability: development
+ SystemPagingTypeMinor = SystemPagingTypeKey.String("minor")
+)
+
+// Enum values for system.process.status
+var (
+ // running
+ // Stability: development
+ SystemProcessStatusRunning = SystemProcessStatusKey.String("running")
+ // sleeping
+ // Stability: development
+ SystemProcessStatusSleeping = SystemProcessStatusKey.String("sleeping")
+ // stopped
+ // Stability: development
+ SystemProcessStatusStopped = SystemProcessStatusKey.String("stopped")
+ // defunct
+ // Stability: development
+ SystemProcessStatusDefunct = SystemProcessStatusKey.String("defunct")
+)
+
+// Namespace: telemetry
+const (
+ // TelemetryDistroNameKey is the attribute Key conforming to the
+ // "telemetry.distro.name" semantic conventions. It represents the name of the
+ // auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "parts-unlimited-java"
+ // Note: Official auto instrumentation agents and distributions SHOULD set the
+ // `telemetry.distro.name` attribute to
+ // a string starting with `opentelemetry-`, e.g.
+ // `opentelemetry-java-instrumentation`.
+ TelemetryDistroNameKey = attribute.Key("telemetry.distro.name")
+
+ // TelemetryDistroVersionKey is the attribute Key conforming to the
+ // "telemetry.distro.version" semantic conventions. It represents the version
+ // string of the auto instrumentation agent or distribution, if used.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2.3"
+ TelemetryDistroVersionKey = attribute.Key("telemetry.distro.version")
+
+ // TelemetrySDKLanguageKey is the attribute Key conforming to the
+ // "telemetry.sdk.language" semantic conventions. It represents the language of
+ // the telemetry SDK.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples:
+ TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language")
+
+ // TelemetrySDKNameKey is the attribute Key conforming to the
+ // "telemetry.sdk.name" semantic conventions. It represents the name of the
+ // telemetry SDK as defined above.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "opentelemetry"
+ // Note: The OpenTelemetry SDK MUST set the `telemetry.sdk.name` attribute to
+ // `opentelemetry`.
+ // If another SDK, like a fork or a vendor-provided implementation, is used,
+ // this SDK MUST set the
+ // `telemetry.sdk.name` attribute to the fully-qualified class or module name of
+ // this SDK's main entry point
+ // or another suitable identifier depending on the language.
+ // The identifier `opentelemetry` is reserved and MUST NOT be used in this case.
+ // All custom identifiers SHOULD be stable across different versions of an
+ // implementation.
+ TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name")
+
+ // TelemetrySDKVersionKey is the attribute Key conforming to the
+ // "telemetry.sdk.version" semantic conventions. It represents the version
+ // string of the telemetry SDK.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "1.2.3"
+ TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version")
+)
+
+// TelemetryDistroName returns an attribute KeyValue conforming to the
+// "telemetry.distro.name" semantic conventions. It represents the name of the
+// auto instrumentation agent or distribution, if used.
+func TelemetryDistroName(val string) attribute.KeyValue {
+ return TelemetryDistroNameKey.String(val)
+}
+
+// TelemetryDistroVersion returns an attribute KeyValue conforming to the
+// "telemetry.distro.version" semantic conventions. It represents the version
+// string of the auto instrumentation agent or distribution, if used.
+func TelemetryDistroVersion(val string) attribute.KeyValue {
+ return TelemetryDistroVersionKey.String(val)
+}
+
+// TelemetrySDKName returns an attribute KeyValue conforming to the
+// "telemetry.sdk.name" semantic conventions. It represents the name of the
+// telemetry SDK as defined above.
+func TelemetrySDKName(val string) attribute.KeyValue {
+ return TelemetrySDKNameKey.String(val)
+}
+
+// TelemetrySDKVersion returns an attribute KeyValue conforming to the
+// "telemetry.sdk.version" semantic conventions. It represents the version string
+// of the telemetry SDK.
+func TelemetrySDKVersion(val string) attribute.KeyValue {
+ return TelemetrySDKVersionKey.String(val)
+}
+
+// Enum values for telemetry.sdk.language
+var (
+ // cpp
+ // Stability: stable
+ TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp")
+ // dotnet
+ // Stability: stable
+ TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet")
+ // erlang
+ // Stability: stable
+ TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang")
+ // go
+ // Stability: stable
+ TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go")
+ // java
+ // Stability: stable
+ TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java")
+ // nodejs
+ // Stability: stable
+ TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs")
+ // php
+ // Stability: stable
+ TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php")
+ // python
+ // Stability: stable
+ TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python")
+ // ruby
+ // Stability: stable
+ TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby")
+ // rust
+ // Stability: stable
+ TelemetrySDKLanguageRust = TelemetrySDKLanguageKey.String("rust")
+ // swift
+ // Stability: stable
+ TelemetrySDKLanguageSwift = TelemetrySDKLanguageKey.String("swift")
+ // webjs
+ // Stability: stable
+ TelemetrySDKLanguageWebJS = TelemetrySDKLanguageKey.String("webjs")
+)
+
+// Namespace: test
+const (
+ // TestCaseNameKey is the attribute Key conforming to the "test.case.name"
+ // semantic conventions. It represents the fully qualified human readable name
+ // of the [test case].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "org.example.TestCase1.test1", "example/tests/TestCase1.test1",
+ // "ExampleTestCase1_test1"
+ //
+ // [test case]: https://wikipedia.org/wiki/Test_case
+ TestCaseNameKey = attribute.Key("test.case.name")
+
+ // TestCaseResultStatusKey is the attribute Key conforming to the
+ // "test.case.result.status" semantic conventions. It represents the status of
+ // the actual test case result from test execution.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "pass", "fail"
+ TestCaseResultStatusKey = attribute.Key("test.case.result.status")
+
+ // TestSuiteNameKey is the attribute Key conforming to the "test.suite.name"
+ // semantic conventions. It represents the human readable name of a [test suite]
+ // .
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TestSuite1"
+ //
+ // [test suite]: https://wikipedia.org/wiki/Test_suite
+ TestSuiteNameKey = attribute.Key("test.suite.name")
+
+ // TestSuiteRunStatusKey is the attribute Key conforming to the
+ // "test.suite.run.status" semantic conventions. It represents the status of the
+ // test suite run.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "success", "failure", "skipped", "aborted", "timed_out",
+ // "in_progress"
+ TestSuiteRunStatusKey = attribute.Key("test.suite.run.status")
+)
+
+// TestCaseName returns an attribute KeyValue conforming to the "test.case.name"
+// semantic conventions. It represents the fully qualified human readable name of
+// the [test case].
+//
+// [test case]: https://wikipedia.org/wiki/Test_case
+func TestCaseName(val string) attribute.KeyValue {
+ return TestCaseNameKey.String(val)
+}
+
+// TestSuiteName returns an attribute KeyValue conforming to the
+// "test.suite.name" semantic conventions. It represents the human readable name
+// of a [test suite].
+//
+// [test suite]: https://wikipedia.org/wiki/Test_suite
+func TestSuiteName(val string) attribute.KeyValue {
+ return TestSuiteNameKey.String(val)
+}
+
+// Enum values for test.case.result.status
+var (
+ // pass
+ // Stability: development
+ TestCaseResultStatusPass = TestCaseResultStatusKey.String("pass")
+ // fail
+ // Stability: development
+ TestCaseResultStatusFail = TestCaseResultStatusKey.String("fail")
+)
+
+// Enum values for test.suite.run.status
+var (
+ // success
+ // Stability: development
+ TestSuiteRunStatusSuccess = TestSuiteRunStatusKey.String("success")
+ // failure
+ // Stability: development
+ TestSuiteRunStatusFailure = TestSuiteRunStatusKey.String("failure")
+ // skipped
+ // Stability: development
+ TestSuiteRunStatusSkipped = TestSuiteRunStatusKey.String("skipped")
+ // aborted
+ // Stability: development
+ TestSuiteRunStatusAborted = TestSuiteRunStatusKey.String("aborted")
+ // timed_out
+ // Stability: development
+ TestSuiteRunStatusTimedOut = TestSuiteRunStatusKey.String("timed_out")
+ // in_progress
+ // Stability: development
+ TestSuiteRunStatusInProgress = TestSuiteRunStatusKey.String("in_progress")
+)
+
+// Namespace: thread
+const (
+ // ThreadIDKey is the attribute Key conforming to the "thread.id" semantic
+ // conventions. It represents the current "managed" thread ID (as opposed to OS
+ // thread ID).
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ ThreadIDKey = attribute.Key("thread.id")
+
+ // ThreadNameKey is the attribute Key conforming to the "thread.name" semantic
+ // conventions. It represents the current thread name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: main
+ ThreadNameKey = attribute.Key("thread.name")
+)
+
+// ThreadID returns an attribute KeyValue conforming to the "thread.id" semantic
+// conventions. It represents the current "managed" thread ID (as opposed to OS
+// thread ID).
+func ThreadID(val int) attribute.KeyValue {
+ return ThreadIDKey.Int(val)
+}
+
+// ThreadName returns an attribute KeyValue conforming to the "thread.name"
+// semantic conventions. It represents the current thread name.
+func ThreadName(val string) attribute.KeyValue {
+ return ThreadNameKey.String(val)
+}
+
+// Namespace: tls
+const (
+ // TLSCipherKey is the attribute Key conforming to the "tls.cipher" semantic
+ // conventions. It represents the string indicating the [cipher] used during the
+ // current connection.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_RSA_WITH_3DES_EDE_CBC_SHA",
+ // "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256"
+ // Note: The values allowed for `tls.cipher` MUST be one of the `Descriptions`
+ // of the [registered TLS Cipher Suits].
+ //
+ // [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+ // [registered TLS Cipher Suits]: https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#table-tls-parameters-4
+ TLSCipherKey = attribute.Key("tls.cipher")
+
+ // TLSClientCertificateKey is the attribute Key conforming to the
+ // "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the client. This is usually
+ // mutually-exclusive of `client.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSClientCertificateKey = attribute.Key("tls.client.certificate")
+
+ // TLSClientCertificateChainKey is the attribute Key conforming to the
+ // "tls.client.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // client. This is usually mutually-exclusive of `client.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSClientCertificateChainKey = attribute.Key("tls.client.certificate_chain")
+
+ // TLSClientHashMd5Key is the attribute Key conforming to the
+ // "tls.client.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSClientHashMd5Key = attribute.Key("tls.client.hash.md5")
+
+ // TLSClientHashSha1Key is the attribute Key conforming to the
+ // "tls.client.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSClientHashSha1Key = attribute.Key("tls.client.hash.sha1")
+
+ // TLSClientHashSha256Key is the attribute Key conforming to the
+ // "tls.client.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the client. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSClientHashSha256Key = attribute.Key("tls.client.hash.sha256")
+
+ // TLSClientIssuerKey is the attribute Key conforming to the "tls.client.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSClientIssuerKey = attribute.Key("tls.client.issuer")
+
+ // TLSClientJa3Key is the attribute Key conforming to the "tls.client.ja3"
+ // semantic conventions. It represents a hash that identifies clients based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSClientJa3Key = attribute.Key("tls.client.ja3")
+
+ // TLSClientNotAfterKey is the attribute Key conforming to the
+ // "tls.client.not_after" semantic conventions. It represents the date/Time
+ // indicating when client certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSClientNotAfterKey = attribute.Key("tls.client.not_after")
+
+ // TLSClientNotBeforeKey is the attribute Key conforming to the
+ // "tls.client.not_before" semantic conventions. It represents the date/Time
+ // indicating when client certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSClientNotBeforeKey = attribute.Key("tls.client.not_before")
+
+ // TLSClientSubjectKey is the attribute Key conforming to the
+ // "tls.client.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myclient, OU=Documentation Team, DC=example, DC=com"
+ TLSClientSubjectKey = attribute.Key("tls.client.subject")
+
+ // TLSClientSupportedCiphersKey is the attribute Key conforming to the
+ // "tls.client.supported_ciphers" semantic conventions. It represents the array
+ // of ciphers offered by the client during the client hello.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
+ // "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384"
+ TLSClientSupportedCiphersKey = attribute.Key("tls.client.supported_ciphers")
+
+ // TLSCurveKey is the attribute Key conforming to the "tls.curve" semantic
+ // conventions. It represents the string indicating the curve used for the given
+ // cipher, when applicable.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "secp256r1"
+ TLSCurveKey = attribute.Key("tls.curve")
+
+ // TLSEstablishedKey is the attribute Key conforming to the "tls.established"
+ // semantic conventions. It represents the boolean flag indicating if the TLS
+ // negotiation was successful and transitioned to an encrypted tunnel.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSEstablishedKey = attribute.Key("tls.established")
+
+ // TLSNextProtocolKey is the attribute Key conforming to the "tls.next_protocol"
+ // semantic conventions. It represents the string indicating the protocol being
+ // tunneled. Per the values in the [IANA registry], this string should be lower
+ // case.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "http/1.1"
+ //
+ // [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+ TLSNextProtocolKey = attribute.Key("tls.next_protocol")
+
+ // TLSProtocolNameKey is the attribute Key conforming to the "tls.protocol.name"
+ // semantic conventions. It represents the normalized lowercase protocol name
+ // parsed from original string of the negotiated [SSL/TLS protocol version].
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolNameKey = attribute.Key("tls.protocol.name")
+
+ // TLSProtocolVersionKey is the attribute Key conforming to the
+ // "tls.protocol.version" semantic conventions. It represents the numeric part
+ // of the version parsed from the original string of the negotiated
+ // [SSL/TLS protocol version].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1.2", "3"
+ //
+ // [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+ TLSProtocolVersionKey = attribute.Key("tls.protocol.version")
+
+ // TLSResumedKey is the attribute Key conforming to the "tls.resumed" semantic
+ // conventions. It represents the boolean flag indicating if this TLS connection
+ // was resumed from an existing TLS negotiation.
+ //
+ // Type: boolean
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: true
+ TLSResumedKey = attribute.Key("tls.resumed")
+
+ // TLSServerCertificateKey is the attribute Key conforming to the
+ // "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+ // stand-alone certificate offered by the server. This is usually
+ // mutually-exclusive of `server.certificate_chain` since this value also exists
+ // in that list.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII..."
+ TLSServerCertificateKey = attribute.Key("tls.server.certificate")
+
+ // TLSServerCertificateChainKey is the attribute Key conforming to the
+ // "tls.server.certificate_chain" semantic conventions. It represents the array
+ // of PEM-encoded certificates that make up the certificate chain offered by the
+ // server. This is usually mutually-exclusive of `server.certificate` since that
+ // value should be the first certificate in the chain.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "MII...", "MI..."
+ TLSServerCertificateChainKey = attribute.Key("tls.server.certificate_chain")
+
+ // TLSServerHashMd5Key is the attribute Key conforming to the
+ // "tls.server.hash.md5" semantic conventions. It represents the certificate
+ // fingerprint using the MD5 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0F76C7F2C55BFD7D8E8B8F4BFBF0C9EC"
+ TLSServerHashMd5Key = attribute.Key("tls.server.hash.md5")
+
+ // TLSServerHashSha1Key is the attribute Key conforming to the
+ // "tls.server.hash.sha1" semantic conventions. It represents the certificate
+ // fingerprint using the SHA1 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9E393D93138888D288266C2D915214D1D1CCEB2A"
+ TLSServerHashSha1Key = attribute.Key("tls.server.hash.sha1")
+
+ // TLSServerHashSha256Key is the attribute Key conforming to the
+ // "tls.server.hash.sha256" semantic conventions. It represents the certificate
+ // fingerprint using the SHA256 digest of DER-encoded version of certificate
+ // offered by the server. For consistency with other hash values, this value
+ // should be formatted as an uppercase hash.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "0687F666A054EF17A08E2F2162EAB4CBC0D265E1D7875BE74BF3C712CA92DAF0"
+ TLSServerHashSha256Key = attribute.Key("tls.server.hash.sha256")
+
+ // TLSServerIssuerKey is the attribute Key conforming to the "tls.server.issuer"
+ // semantic conventions. It represents the distinguished name of [subject] of
+ // the issuer of the x.509 certificate presented by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=Example Root CA, OU=Infrastructure Team, DC=example, DC=com"
+ //
+ // [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+ TLSServerIssuerKey = attribute.Key("tls.server.issuer")
+
+ // TLSServerJa3sKey is the attribute Key conforming to the "tls.server.ja3s"
+ // semantic conventions. It represents a hash that identifies servers based on
+ // how they perform an SSL/TLS handshake.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "d4e5b18d6b55c71272893221c96ba240"
+ TLSServerJa3sKey = attribute.Key("tls.server.ja3s")
+
+ // TLSServerNotAfterKey is the attribute Key conforming to the
+ // "tls.server.not_after" semantic conventions. It represents the date/Time
+ // indicating when server certificate is no longer considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "2021-01-01T00:00:00.000Z"
+ TLSServerNotAfterKey = attribute.Key("tls.server.not_after")
+
+ // TLSServerNotBeforeKey is the attribute Key conforming to the
+ // "tls.server.not_before" semantic conventions. It represents the date/Time
+ // indicating when server certificate is first considered valid.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "1970-01-01T00:00:00.000Z"
+ TLSServerNotBeforeKey = attribute.Key("tls.server.not_before")
+
+ // TLSServerSubjectKey is the attribute Key conforming to the
+ // "tls.server.subject" semantic conventions. It represents the distinguished
+ // name of subject of the x.509 certificate presented by the server.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "CN=myserver, OU=Documentation Team, DC=example, DC=com"
+ TLSServerSubjectKey = attribute.Key("tls.server.subject")
+)
+
+// TLSCipher returns an attribute KeyValue conforming to the "tls.cipher"
+// semantic conventions. It represents the string indicating the [cipher] used
+// during the current connection.
+//
+// [cipher]: https://datatracker.ietf.org/doc/html/rfc5246#appendix-A.5
+func TLSCipher(val string) attribute.KeyValue {
+ return TLSCipherKey.String(val)
+}
+
+// TLSClientCertificate returns an attribute KeyValue conforming to the
+// "tls.client.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the client. This is usually
+// mutually-exclusive of `client.certificate_chain` since this value also exists
+// in that list.
+func TLSClientCertificate(val string) attribute.KeyValue {
+ return TLSClientCertificateKey.String(val)
+}
+
+// TLSClientCertificateChain returns an attribute KeyValue conforming to the
+// "tls.client.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// client. This is usually mutually-exclusive of `client.certificate` since that
+// value should be the first certificate in the chain.
+func TLSClientCertificateChain(val ...string) attribute.KeyValue {
+ return TLSClientCertificateChainKey.StringSlice(val)
+}
+
+// TLSClientHashMd5 returns an attribute KeyValue conforming to the
+// "tls.client.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the client. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSClientHashMd5(val string) attribute.KeyValue {
+ return TLSClientHashMd5Key.String(val)
+}
+
+// TLSClientHashSha1 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha1(val string) attribute.KeyValue {
+ return TLSClientHashSha1Key.String(val)
+}
+
+// TLSClientHashSha256 returns an attribute KeyValue conforming to the
+// "tls.client.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the client. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSClientHashSha256(val string) attribute.KeyValue {
+ return TLSClientHashSha256Key.String(val)
+}
+
+// TLSClientIssuer returns an attribute KeyValue conforming to the
+// "tls.client.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSClientIssuer(val string) attribute.KeyValue {
+ return TLSClientIssuerKey.String(val)
+}
+
+// TLSClientJa3 returns an attribute KeyValue conforming to the "tls.client.ja3"
+// semantic conventions. It represents a hash that identifies clients based on
+// how they perform an SSL/TLS handshake.
+func TLSClientJa3(val string) attribute.KeyValue {
+ return TLSClientJa3Key.String(val)
+}
+
+// TLSClientNotAfter returns an attribute KeyValue conforming to the
+// "tls.client.not_after" semantic conventions. It represents the date/Time
+// indicating when client certificate is no longer considered valid.
+func TLSClientNotAfter(val string) attribute.KeyValue {
+ return TLSClientNotAfterKey.String(val)
+}
+
+// TLSClientNotBefore returns an attribute KeyValue conforming to the
+// "tls.client.not_before" semantic conventions. It represents the date/Time
+// indicating when client certificate is first considered valid.
+func TLSClientNotBefore(val string) attribute.KeyValue {
+ return TLSClientNotBeforeKey.String(val)
+}
+
+// TLSClientSubject returns an attribute KeyValue conforming to the
+// "tls.client.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the client.
+func TLSClientSubject(val string) attribute.KeyValue {
+ return TLSClientSubjectKey.String(val)
+}
+
+// TLSClientSupportedCiphers returns an attribute KeyValue conforming to the
+// "tls.client.supported_ciphers" semantic conventions. It represents the array
+// of ciphers offered by the client during the client hello.
+func TLSClientSupportedCiphers(val ...string) attribute.KeyValue {
+ return TLSClientSupportedCiphersKey.StringSlice(val)
+}
+
+// TLSCurve returns an attribute KeyValue conforming to the "tls.curve" semantic
+// conventions. It represents the string indicating the curve used for the given
+// cipher, when applicable.
+func TLSCurve(val string) attribute.KeyValue {
+ return TLSCurveKey.String(val)
+}
+
+// TLSEstablished returns an attribute KeyValue conforming to the
+// "tls.established" semantic conventions. It represents the boolean flag
+// indicating if the TLS negotiation was successful and transitioned to an
+// encrypted tunnel.
+func TLSEstablished(val bool) attribute.KeyValue {
+ return TLSEstablishedKey.Bool(val)
+}
+
+// TLSNextProtocol returns an attribute KeyValue conforming to the
+// "tls.next_protocol" semantic conventions. It represents the string indicating
+// the protocol being tunneled. Per the values in the [IANA registry], this
+// string should be lower case.
+//
+// [IANA registry]: https://www.iana.org/assignments/tls-extensiontype-values/tls-extensiontype-values.xhtml#alpn-protocol-ids
+func TLSNextProtocol(val string) attribute.KeyValue {
+ return TLSNextProtocolKey.String(val)
+}
+
+// TLSProtocolVersion returns an attribute KeyValue conforming to the
+// "tls.protocol.version" semantic conventions. It represents the numeric part of
+// the version parsed from the original string of the negotiated
+// [SSL/TLS protocol version].
+//
+// [SSL/TLS protocol version]: https://docs.openssl.org/1.1.1/man3/SSL_get_version/#return-values
+func TLSProtocolVersion(val string) attribute.KeyValue {
+ return TLSProtocolVersionKey.String(val)
+}
+
+// TLSResumed returns an attribute KeyValue conforming to the "tls.resumed"
+// semantic conventions. It represents the boolean flag indicating if this TLS
+// connection was resumed from an existing TLS negotiation.
+func TLSResumed(val bool) attribute.KeyValue {
+ return TLSResumedKey.Bool(val)
+}
+
+// TLSServerCertificate returns an attribute KeyValue conforming to the
+// "tls.server.certificate" semantic conventions. It represents the PEM-encoded
+// stand-alone certificate offered by the server. This is usually
+// mutually-exclusive of `server.certificate_chain` since this value also exists
+// in that list.
+func TLSServerCertificate(val string) attribute.KeyValue {
+ return TLSServerCertificateKey.String(val)
+}
+
+// TLSServerCertificateChain returns an attribute KeyValue conforming to the
+// "tls.server.certificate_chain" semantic conventions. It represents the array
+// of PEM-encoded certificates that make up the certificate chain offered by the
+// server. This is usually mutually-exclusive of `server.certificate` since that
+// value should be the first certificate in the chain.
+func TLSServerCertificateChain(val ...string) attribute.KeyValue {
+ return TLSServerCertificateChainKey.StringSlice(val)
+}
+
+// TLSServerHashMd5 returns an attribute KeyValue conforming to the
+// "tls.server.hash.md5" semantic conventions. It represents the certificate
+// fingerprint using the MD5 digest of DER-encoded version of certificate offered
+// by the server. For consistency with other hash values, this value should be
+// formatted as an uppercase hash.
+func TLSServerHashMd5(val string) attribute.KeyValue {
+ return TLSServerHashMd5Key.String(val)
+}
+
+// TLSServerHashSha1 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha1" semantic conventions. It represents the certificate
+// fingerprint using the SHA1 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha1(val string) attribute.KeyValue {
+ return TLSServerHashSha1Key.String(val)
+}
+
+// TLSServerHashSha256 returns an attribute KeyValue conforming to the
+// "tls.server.hash.sha256" semantic conventions. It represents the certificate
+// fingerprint using the SHA256 digest of DER-encoded version of certificate
+// offered by the server. For consistency with other hash values, this value
+// should be formatted as an uppercase hash.
+func TLSServerHashSha256(val string) attribute.KeyValue {
+ return TLSServerHashSha256Key.String(val)
+}
+
+// TLSServerIssuer returns an attribute KeyValue conforming to the
+// "tls.server.issuer" semantic conventions. It represents the distinguished name
+// of [subject] of the issuer of the x.509 certificate presented by the client.
+//
+// [subject]: https://datatracker.ietf.org/doc/html/rfc5280#section-4.1.2.6
+func TLSServerIssuer(val string) attribute.KeyValue {
+ return TLSServerIssuerKey.String(val)
+}
+
+// TLSServerJa3s returns an attribute KeyValue conforming to the
+// "tls.server.ja3s" semantic conventions. It represents a hash that identifies
+// servers based on how they perform an SSL/TLS handshake.
+func TLSServerJa3s(val string) attribute.KeyValue {
+ return TLSServerJa3sKey.String(val)
+}
+
+// TLSServerNotAfter returns an attribute KeyValue conforming to the
+// "tls.server.not_after" semantic conventions. It represents the date/Time
+// indicating when server certificate is no longer considered valid.
+func TLSServerNotAfter(val string) attribute.KeyValue {
+ return TLSServerNotAfterKey.String(val)
+}
+
+// TLSServerNotBefore returns an attribute KeyValue conforming to the
+// "tls.server.not_before" semantic conventions. It represents the date/Time
+// indicating when server certificate is first considered valid.
+func TLSServerNotBefore(val string) attribute.KeyValue {
+ return TLSServerNotBeforeKey.String(val)
+}
+
+// TLSServerSubject returns an attribute KeyValue conforming to the
+// "tls.server.subject" semantic conventions. It represents the distinguished
+// name of subject of the x.509 certificate presented by the server.
+func TLSServerSubject(val string) attribute.KeyValue {
+ return TLSServerSubjectKey.String(val)
+}
+
+// Enum values for tls.protocol.name
+var (
+ // ssl
+ // Stability: development
+ TLSProtocolNameSsl = TLSProtocolNameKey.String("ssl")
+ // tls
+ // Stability: development
+ TLSProtocolNameTLS = TLSProtocolNameKey.String("tls")
+)
+
+// Namespace: url
+const (
+ // URLDomainKey is the attribute Key conforming to the "url.domain" semantic
+ // conventions. It represents the domain extracted from the `url.full`, such as
+ // "opentelemetry.io".
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "www.foo.bar", "opentelemetry.io", "3.12.167.2",
+ // "[1080:0:0:0:8:800:200C:417A]"
+ // Note: In some cases a URL may refer to an IP and/or port directly, without a
+ // domain name. In this case, the IP address would go to the domain field. If
+ // the URL contains a [literal IPv6 address] enclosed by `[` and `]`, the `[`
+ // and `]` characters should also be captured in the domain field.
+ //
+ // [literal IPv6 address]: https://www.rfc-editor.org/rfc/rfc2732#section-2
+ URLDomainKey = attribute.Key("url.domain")
+
+ // URLExtensionKey is the attribute Key conforming to the "url.extension"
+ // semantic conventions. It represents the file extension extracted from the
+ // `url.full`, excluding the leading dot.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "png", "gz"
+ // Note: The file extension is only set if it exists, as not every url has a
+ // file extension. When the file name has multiple extensions `example.tar.gz`,
+ // only the last one should be captured `gz`, not `tar.gz`.
+ URLExtensionKey = attribute.Key("url.extension")
+
+ // URLFragmentKey is the attribute Key conforming to the "url.fragment" semantic
+ // conventions. It represents the [URI fragment] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "SemConv"
+ //
+ // [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+ URLFragmentKey = attribute.Key("url.fragment")
+
+ // URLFullKey is the attribute Key conforming to the "url.full" semantic
+ // conventions. It represents the absolute URL describing a network resource
+ // according to [RFC3986].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv", "//localhost"
+ // Note: For network calls, URL usually has
+ // `scheme://host[:port][path][?query][#fragment]` format, where the fragment
+ // is not transmitted over HTTP, but if it is known, it SHOULD be included
+ // nevertheless.
+ //
+ // `url.full` MUST NOT contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`.
+ // In such case username and password SHOULD be redacted and attribute's value
+ // SHOULD be `https://REDACTED:REDACTED@www.example.com/`.
+ //
+ // `url.full` SHOULD capture the absolute URL when it is available (or can be
+ // reconstructed).
+ //
+ // Sensitive content provided in `url.full` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the
+ // value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `https://www.example.com/path?color=blue&sig=REDACTED`.
+ //
+ // [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLFullKey = attribute.Key("url.full")
+
+ // URLOriginalKey is the attribute Key conforming to the "url.original" semantic
+ // conventions. It represents the unmodified original URL as seen in the event
+ // source.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "https://www.foo.bar/search?q=OpenTelemetry#SemConv",
+ // "search?q=OpenTelemetry"
+ // Note: In network monitoring, the observed URL may be a full URL, whereas in
+ // access logs, the URL is often just represented as a path. This field is meant
+ // to represent the URL as it was observed, complete or not.
+ // `url.original` might contain credentials passed via URL in form of
+ // `https://username:password@www.example.com/`. In such case password and
+ // username SHOULD NOT be redacted and attribute's value SHOULD remain the same.
+ URLOriginalKey = attribute.Key("url.original")
+
+ // URLPathKey is the attribute Key conforming to the "url.path" semantic
+ // conventions. It represents the [URI path] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "/search"
+ // Note: Sensitive content provided in `url.path` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ // [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+ URLPathKey = attribute.Key("url.path")
+
+ // URLPortKey is the attribute Key conforming to the "url.port" semantic
+ // conventions. It represents the port extracted from the `url.full`.
+ //
+ // Type: int
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: 443
+ URLPortKey = attribute.Key("url.port")
+
+ // URLQueryKey is the attribute Key conforming to the "url.query" semantic
+ // conventions. It represents the [URI query] component.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "q=OpenTelemetry"
+ // Note: Sensitive content provided in `url.query` SHOULD be scrubbed when
+ // instrumentations can identify it.
+ //
+ //
+ // Query string values for the following keys SHOULD be redacted by default and
+ // replaced by the value `REDACTED`:
+ //
+ // - [`AWSAccessKeyId`]
+ // - [`Signature`]
+ // - [`sig`]
+ // - [`X-Goog-Signature`]
+ //
+ // This list is subject to change over time.
+ //
+ // When a query string value is redacted, the query string key SHOULD still be
+ // preserved, e.g.
+ // `q=OpenTelemetry&sig=REDACTED`.
+ //
+ // [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+ // [`AWSAccessKeyId`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`Signature`]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/RESTAuthentication.html#RESTAuthenticationQueryStringAuth
+ // [`sig`]: https://learn.microsoft.com/azure/storage/common/storage-sas-overview#sas-token
+ // [`X-Goog-Signature`]: https://cloud.google.com/storage/docs/access-control/signed-urls
+ URLQueryKey = attribute.Key("url.query")
+
+ // URLRegisteredDomainKey is the attribute Key conforming to the
+ // "url.registered_domain" semantic conventions. It represents the highest
+ // registered url domain, stripped of the subdomain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "example.com", "foo.co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ // For example, the registered domain for `foo.example.com` is `example.com`.
+ // Trying to approximate this by simply taking the last two labels will not work
+ // well for TLDs such as `co.uk`.
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLRegisteredDomainKey = attribute.Key("url.registered_domain")
+
+ // URLSchemeKey is the attribute Key conforming to the "url.scheme" semantic
+ // conventions. It represents the [URI scheme] component identifying the used
+ // protocol.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "https", "ftp", "telnet"
+ //
+ // [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+ URLSchemeKey = attribute.Key("url.scheme")
+
+ // URLSubdomainKey is the attribute Key conforming to the "url.subdomain"
+ // semantic conventions. It represents the subdomain portion of a fully
+ // qualified domain name includes all of the names except the host name under
+ // the registered_domain. In a partially qualified domain, or if the
+ // qualification level of the full name cannot be determined, subdomain contains
+ // all of the names below the registered domain.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "east", "sub2.sub1"
+ // Note: The subdomain portion of `www.east.mydomain.co.uk` is `east`. If the
+ // domain has multiple levels of subdomain, such as `sub2.sub1.example.com`, the
+ // subdomain field should contain `sub2.sub1`, with no trailing period.
+ URLSubdomainKey = attribute.Key("url.subdomain")
+
+ // URLTemplateKey is the attribute Key conforming to the "url.template" semantic
+ // conventions. It represents the low-cardinality template of an
+ // [absolute path reference].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "/users/{id}", "/users/:id", "/users?id={id}"
+ //
+ // [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+ URLTemplateKey = attribute.Key("url.template")
+
+ // URLTopLevelDomainKey is the attribute Key conforming to the
+ // "url.top_level_domain" semantic conventions. It represents the effective top
+ // level domain (eTLD), also known as the domain suffix, is the last part of the
+ // domain name. For example, the top level domain for example.com is `com`.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "com", "co.uk"
+ // Note: This value can be determined precisely with the [public suffix list].
+ //
+ // [public suffix list]: https://publicsuffix.org/
+ URLTopLevelDomainKey = attribute.Key("url.top_level_domain")
+)
+
+// URLDomain returns an attribute KeyValue conforming to the "url.domain"
+// semantic conventions. It represents the domain extracted from the `url.full`,
+// such as "opentelemetry.io".
+func URLDomain(val string) attribute.KeyValue {
+ return URLDomainKey.String(val)
+}
+
+// URLExtension returns an attribute KeyValue conforming to the "url.extension"
+// semantic conventions. It represents the file extension extracted from the
+// `url.full`, excluding the leading dot.
+func URLExtension(val string) attribute.KeyValue {
+ return URLExtensionKey.String(val)
+}
+
+// URLFragment returns an attribute KeyValue conforming to the "url.fragment"
+// semantic conventions. It represents the [URI fragment] component.
+//
+// [URI fragment]: https://www.rfc-editor.org/rfc/rfc3986#section-3.5
+func URLFragment(val string) attribute.KeyValue {
+ return URLFragmentKey.String(val)
+}
+
+// URLFull returns an attribute KeyValue conforming to the "url.full" semantic
+// conventions. It represents the absolute URL describing a network resource
+// according to [RFC3986].
+//
+// [RFC3986]: https://www.rfc-editor.org/rfc/rfc3986
+func URLFull(val string) attribute.KeyValue {
+ return URLFullKey.String(val)
+}
+
+// URLOriginal returns an attribute KeyValue conforming to the "url.original"
+// semantic conventions. It represents the unmodified original URL as seen in the
+// event source.
+func URLOriginal(val string) attribute.KeyValue {
+ return URLOriginalKey.String(val)
+}
+
+// URLPath returns an attribute KeyValue conforming to the "url.path" semantic
+// conventions. It represents the [URI path] component.
+//
+// [URI path]: https://www.rfc-editor.org/rfc/rfc3986#section-3.3
+func URLPath(val string) attribute.KeyValue {
+ return URLPathKey.String(val)
+}
+
+// URLPort returns an attribute KeyValue conforming to the "url.port" semantic
+// conventions. It represents the port extracted from the `url.full`.
+func URLPort(val int) attribute.KeyValue {
+ return URLPortKey.Int(val)
+}
+
+// URLQuery returns an attribute KeyValue conforming to the "url.query" semantic
+// conventions. It represents the [URI query] component.
+//
+// [URI query]: https://www.rfc-editor.org/rfc/rfc3986#section-3.4
+func URLQuery(val string) attribute.KeyValue {
+ return URLQueryKey.String(val)
+}
+
+// URLRegisteredDomain returns an attribute KeyValue conforming to the
+// "url.registered_domain" semantic conventions. It represents the highest
+// registered url domain, stripped of the subdomain.
+func URLRegisteredDomain(val string) attribute.KeyValue {
+ return URLRegisteredDomainKey.String(val)
+}
+
+// URLScheme returns an attribute KeyValue conforming to the "url.scheme"
+// semantic conventions. It represents the [URI scheme] component identifying the
+// used protocol.
+//
+// [URI scheme]: https://www.rfc-editor.org/rfc/rfc3986#section-3.1
+func URLScheme(val string) attribute.KeyValue {
+ return URLSchemeKey.String(val)
+}
+
+// URLSubdomain returns an attribute KeyValue conforming to the "url.subdomain"
+// semantic conventions. It represents the subdomain portion of a fully qualified
+// domain name includes all of the names except the host name under the
+// registered_domain. In a partially qualified domain, or if the qualification
+// level of the full name cannot be determined, subdomain contains all of the
+// names below the registered domain.
+func URLSubdomain(val string) attribute.KeyValue {
+ return URLSubdomainKey.String(val)
+}
+
+// URLTemplate returns an attribute KeyValue conforming to the "url.template"
+// semantic conventions. It represents the low-cardinality template of an
+// [absolute path reference].
+//
+// [absolute path reference]: https://www.rfc-editor.org/rfc/rfc3986#section-4.2
+func URLTemplate(val string) attribute.KeyValue {
+ return URLTemplateKey.String(val)
+}
+
+// URLTopLevelDomain returns an attribute KeyValue conforming to the
+// "url.top_level_domain" semantic conventions. It represents the effective top
+// level domain (eTLD), also known as the domain suffix, is the last part of the
+// domain name. For example, the top level domain for example.com is `com`.
+func URLTopLevelDomain(val string) attribute.KeyValue {
+ return URLTopLevelDomainKey.String(val)
+}
+
+// Namespace: user
+const (
+ // UserEmailKey is the attribute Key conforming to the "user.email" semantic
+ // conventions. It represents the user email address.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein@example.com"
+ UserEmailKey = attribute.Key("user.email")
+
+ // UserFullNameKey is the attribute Key conforming to the "user.full_name"
+ // semantic conventions. It represents the user's full name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Albert Einstein"
+ UserFullNameKey = attribute.Key("user.full_name")
+
+ // UserHashKey is the attribute Key conforming to the "user.hash" semantic
+ // conventions. It represents the unique user hash to correlate information for
+ // a user in anonymized form.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "364fc68eaf4c8acec74a4e52d7d1feaa"
+ // Note: Useful if `user.id` or `user.name` contain confidential information and
+ // cannot be used.
+ UserHashKey = attribute.Key("user.hash")
+
+ // UserIDKey is the attribute Key conforming to the "user.id" semantic
+ // conventions. It represents the unique identifier of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "S-1-5-21-202424912787-2692429404-2351956786-1000"
+ UserIDKey = attribute.Key("user.id")
+
+ // UserNameKey is the attribute Key conforming to the "user.name" semantic
+ // conventions. It represents the short name or login/username of the user.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "a.einstein"
+ UserNameKey = attribute.Key("user.name")
+
+ // UserRolesKey is the attribute Key conforming to the "user.roles" semantic
+ // conventions. It represents the array of user roles at the time of the event.
+ //
+ // Type: string[]
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "admin", "reporting_user"
+ UserRolesKey = attribute.Key("user.roles")
+)
+
+// UserEmail returns an attribute KeyValue conforming to the "user.email"
+// semantic conventions. It represents the user email address.
+func UserEmail(val string) attribute.KeyValue {
+ return UserEmailKey.String(val)
+}
+
+// UserFullName returns an attribute KeyValue conforming to the "user.full_name"
+// semantic conventions. It represents the user's full name.
+func UserFullName(val string) attribute.KeyValue {
+ return UserFullNameKey.String(val)
+}
+
+// UserHash returns an attribute KeyValue conforming to the "user.hash" semantic
+// conventions. It represents the unique user hash to correlate information for a
+// user in anonymized form.
+func UserHash(val string) attribute.KeyValue {
+ return UserHashKey.String(val)
+}
+
+// UserID returns an attribute KeyValue conforming to the "user.id" semantic
+// conventions. It represents the unique identifier of the user.
+func UserID(val string) attribute.KeyValue {
+ return UserIDKey.String(val)
+}
+
+// UserName returns an attribute KeyValue conforming to the "user.name" semantic
+// conventions. It represents the short name or login/username of the user.
+func UserName(val string) attribute.KeyValue {
+ return UserNameKey.String(val)
+}
+
+// UserRoles returns an attribute KeyValue conforming to the "user.roles"
+// semantic conventions. It represents the array of user roles at the time of the
+// event.
+func UserRoles(val ...string) attribute.KeyValue {
+ return UserRolesKey.StringSlice(val)
+}
+
+// Namespace: user_agent
+const (
+ // UserAgentNameKey is the attribute Key conforming to the "user_agent.name"
+ // semantic conventions. It represents the name of the user-agent extracted from
+ // original. Usually refers to the browser's name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Safari", "YourApp"
+ // Note: [Example] of extracting browser's name from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant name SHOULD be selected. In such a scenario it should align with
+ // `user_agent.version`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentNameKey = attribute.Key("user_agent.name")
+
+ // UserAgentOriginalKey is the attribute Key conforming to the
+ // "user_agent.original" semantic conventions. It represents the value of the
+ // [HTTP User-Agent] header sent by the client.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Stable
+ //
+ // Examples: "CERN-LineMode/2.15 libwww/2.17b3", "Mozilla/5.0 (iPhone; CPU
+ // iPhone OS 14_7_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko)
+ // Version/14.1.2 Mobile/15E148 Safari/604.1", "YourApp/1.0.0
+ // grpc-java-okhttp/1.27.2"
+ //
+ // [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+ UserAgentOriginalKey = attribute.Key("user_agent.original")
+
+ // UserAgentOSNameKey is the attribute Key conforming to the
+ // "user_agent.os.name" semantic conventions. It represents the human readable
+ // operating system name.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "iOS", "Android", "Ubuntu"
+ // Note: For mapping user agent strings to OS names, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSNameKey = attribute.Key("user_agent.os.name")
+
+ // UserAgentOSVersionKey is the attribute Key conforming to the
+ // "user_agent.os.version" semantic conventions. It represents the version
+ // string of the operating system as defined in [Version Attributes].
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.2.1", "18.04.1"
+ // Note: For mapping user agent strings to OS versions, libraries such as
+ // [ua-parser] can be utilized.
+ //
+ // [Version Attributes]: /docs/resource/README.md#version-attributes
+ // [ua-parser]: https://github.com/ua-parser
+ UserAgentOSVersionKey = attribute.Key("user_agent.os.version")
+
+ // UserAgentSyntheticTypeKey is the attribute Key conforming to the
+ // "user_agent.synthetic.type" semantic conventions. It represents the specifies
+ // the category of synthetic traffic, such as tests or bots.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // Note: This attribute MAY be derived from the contents of the
+ // `user_agent.original` attribute. Components that populate the attribute are
+ // responsible for determining what they consider to be synthetic bot or test
+ // traffic. This attribute can either be set for self-identification purposes,
+ // or on telemetry detected to be generated as a result of a synthetic request.
+ // This attribute is useful for distinguishing between genuine client traffic
+ // and synthetic traffic generated by bots or tests.
+ UserAgentSyntheticTypeKey = attribute.Key("user_agent.synthetic.type")
+
+ // UserAgentVersionKey is the attribute Key conforming to the
+ // "user_agent.version" semantic conventions. It represents the version of the
+ // user-agent extracted from original. Usually refers to the browser's version.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "14.1.2", "1.0.0"
+ // Note: [Example] of extracting browser's version from original string. In the
+ // case of using a user-agent for non-browser products, such as microservices
+ // with multiple names/versions inside the `user_agent.original`, the most
+ // significant version SHOULD be selected. In such a scenario it should align
+ // with `user_agent.name`
+ //
+ // [Example]: https://www.whatsmyua.info
+ UserAgentVersionKey = attribute.Key("user_agent.version")
+)
+
+// UserAgentName returns an attribute KeyValue conforming to the
+// "user_agent.name" semantic conventions. It represents the name of the
+// user-agent extracted from original. Usually refers to the browser's name.
+func UserAgentName(val string) attribute.KeyValue {
+ return UserAgentNameKey.String(val)
+}
+
+// UserAgentOriginal returns an attribute KeyValue conforming to the
+// "user_agent.original" semantic conventions. It represents the value of the
+// [HTTP User-Agent] header sent by the client.
+//
+// [HTTP User-Agent]: https://www.rfc-editor.org/rfc/rfc9110.html#field.user-agent
+func UserAgentOriginal(val string) attribute.KeyValue {
+ return UserAgentOriginalKey.String(val)
+}
+
+// UserAgentOSName returns an attribute KeyValue conforming to the
+// "user_agent.os.name" semantic conventions. It represents the human readable
+// operating system name.
+func UserAgentOSName(val string) attribute.KeyValue {
+ return UserAgentOSNameKey.String(val)
+}
+
+// UserAgentOSVersion returns an attribute KeyValue conforming to the
+// "user_agent.os.version" semantic conventions. It represents the version string
+// of the operating system as defined in [Version Attributes].
+//
+// [Version Attributes]: /docs/resource/README.md#version-attributes
+func UserAgentOSVersion(val string) attribute.KeyValue {
+ return UserAgentOSVersionKey.String(val)
+}
+
+// UserAgentVersion returns an attribute KeyValue conforming to the
+// "user_agent.version" semantic conventions. It represents the version of the
+// user-agent extracted from original. Usually refers to the browser's version.
+func UserAgentVersion(val string) attribute.KeyValue {
+ return UserAgentVersionKey.String(val)
+}
+
+// Enum values for user_agent.synthetic.type
+var (
+ // Bot source.
+ // Stability: development
+ UserAgentSyntheticTypeBot = UserAgentSyntheticTypeKey.String("bot")
+ // Synthetic test source.
+ // Stability: development
+ UserAgentSyntheticTypeTest = UserAgentSyntheticTypeKey.String("test")
+)
+
+// Namespace: vcs
+const (
+ // VCSChangeIDKey is the attribute Key conforming to the "vcs.change.id"
+ // semantic conventions. It represents the ID of the change (pull request/merge
+ // request/changelist) if applicable. This is usually a unique (within
+ // repository) identifier generated by the VCS system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "123"
+ VCSChangeIDKey = attribute.Key("vcs.change.id")
+
+ // VCSChangeStateKey is the attribute Key conforming to the "vcs.change.state"
+ // semantic conventions. It represents the state of the change (pull
+ // request/merge request/changelist).
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "open", "closed", "merged"
+ VCSChangeStateKey = attribute.Key("vcs.change.state")
+
+ // VCSChangeTitleKey is the attribute Key conforming to the "vcs.change.title"
+ // semantic conventions. It represents the human readable title of the change
+ // (pull request/merge request/changelist). This title is often a brief summary
+ // of the change and may get merged in to a ref as the commit summary.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "Fixes broken thing", "feat: add my new feature", "[chore] update
+ // dependency"
+ VCSChangeTitleKey = attribute.Key("vcs.change.title")
+
+ // VCSLineChangeTypeKey is the attribute Key conforming to the
+ // "vcs.line_change.type" semantic conventions. It represents the type of line
+ // change being measured on a branch or change.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "added", "removed"
+ VCSLineChangeTypeKey = attribute.Key("vcs.line_change.type")
+
+ // VCSOwnerNameKey is the attribute Key conforming to the "vcs.owner.name"
+ // semantic conventions. It represents the group owner within the version
+ // control system.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-org", "myteam", "business-unit"
+ VCSOwnerNameKey = attribute.Key("vcs.owner.name")
+
+ // VCSProviderNameKey is the attribute Key conforming to the "vcs.provider.name"
+ // semantic conventions. It represents the name of the version control system
+ // provider.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "github", "gitlab", "gitea", "bitbucket"
+ VCSProviderNameKey = attribute.Key("vcs.provider.name")
+
+ // VCSRefBaseNameKey is the attribute Key conforming to the "vcs.ref.base.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseNameKey = attribute.Key("vcs.ref.base.name")
+
+ // VCSRefBaseRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.base.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits. The
+ // revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.base.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefBaseRevisionKey = attribute.Key("vcs.ref.base.revision")
+
+ // VCSRefBaseTypeKey is the attribute Key conforming to the "vcs.ref.base.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `base` refers to the starting point of a change. For example, `main`
+ // would be the base reference of type branch if you've created a new
+ // reference of type branch from it and created new commits.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefBaseTypeKey = attribute.Key("vcs.ref.base.type")
+
+ // VCSRefHeadNameKey is the attribute Key conforming to the "vcs.ref.head.name"
+ // semantic conventions. It represents the name of the [reference] such as
+ // **branch** or **tag** in the repository.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "my-feature-branch", "tag-1-test"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadNameKey = attribute.Key("vcs.ref.head.name")
+
+ // VCSRefHeadRevisionKey is the attribute Key conforming to the
+ // "vcs.ref.head.revision" semantic conventions. It represents the revision,
+ // literally [revised version], The revision most often refers to a commit
+ // object in Git, or a revision number in SVN.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "9d59409acf479dfa0df1aa568182e43e43df8bbe28d60fcf2bc52e30068802cc",
+ // "main", "123", "HEAD"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.The revision can be a full [hash value (see
+ // glossary)],
+ // of the recorded change to a ref within a repository pointing to a
+ // commit [commit] object. It does
+ // not necessarily have to be a hash; it can simply define a [revision
+ // number]
+ // which is an integer that is monotonically increasing. In cases where
+ // it is identical to the `ref.head.name`, it SHOULD still be included.
+ // It is up to the implementer to decide which value to set as the
+ // revision based on the VCS system and situational context.
+ //
+ // [revised version]: https://www.merriam-webster.com/dictionary/revision
+ // [hash value (see
+ // glossary)]: https://nvlpubs.nist.gov/nistpubs/FIPS/NIST.FIPS.186-5.pdf
+ // [commit]: https://git-scm.com/docs/git-commit
+ // [revision
+ // number]: https://svnbook.red-bean.com/en/1.7/svn.tour.revs.specifiers.html
+ VCSRefHeadRevisionKey = attribute.Key("vcs.ref.head.revision")
+
+ // VCSRefHeadTypeKey is the attribute Key conforming to the "vcs.ref.head.type"
+ // semantic conventions. It represents the type of the [reference] in the
+ // repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ // Note: `head` refers to where you are right now; the current reference at a
+ // given time.
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefHeadTypeKey = attribute.Key("vcs.ref.head.type")
+
+ // VCSRefTypeKey is the attribute Key conforming to the "vcs.ref.type" semantic
+ // conventions. It represents the type of the [reference] in the repository.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "branch", "tag"
+ //
+ // [reference]: https://git-scm.com/docs/gitglossary#def_ref
+ VCSRefTypeKey = attribute.Key("vcs.ref.type")
+
+ // VCSRepositoryNameKey is the attribute Key conforming to the
+ // "vcs.repository.name" semantic conventions. It represents the human readable
+ // name of the repository. It SHOULD NOT include any additional identifier like
+ // Group/SubGroup in GitLab or organization in GitHub.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "semantic-conventions", "my-cool-repo"
+ // Note: Due to it only being the name, it can clash with forks of the same
+ // repository if collecting telemetry across multiple orgs or groups in
+ // the same backends.
+ VCSRepositoryNameKey = attribute.Key("vcs.repository.name")
+
+ // VCSRepositoryURLFullKey is the attribute Key conforming to the
+ // "vcs.repository.url.full" semantic conventions. It represents the
+ // [canonical URL] of the repository providing the complete HTTP(S) address in
+ // order to locate and identify the repository through a browser.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples:
+ // "https://github.com/opentelemetry/open-telemetry-collector-contrib",
+ // "https://gitlab.com/my-org/my-project/my-projects-project/repo"
+ // Note: In Git Version Control Systems, the canonical URL SHOULD NOT include
+ // the `.git` extension.
+ //
+ // [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+ VCSRepositoryURLFullKey = attribute.Key("vcs.repository.url.full")
+
+ // VCSRevisionDeltaDirectionKey is the attribute Key conforming to the
+ // "vcs.revision_delta.direction" semantic conventions. It represents the type
+ // of revision comparison.
+ //
+ // Type: Enum
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "ahead", "behind"
+ VCSRevisionDeltaDirectionKey = attribute.Key("vcs.revision_delta.direction")
+)
+
+// VCSChangeID returns an attribute KeyValue conforming to the "vcs.change.id"
+// semantic conventions. It represents the ID of the change (pull request/merge
+// request/changelist) if applicable. This is usually a unique (within
+// repository) identifier generated by the VCS system.
+func VCSChangeID(val string) attribute.KeyValue {
+ return VCSChangeIDKey.String(val)
+}
+
+// VCSChangeTitle returns an attribute KeyValue conforming to the
+// "vcs.change.title" semantic conventions. It represents the human readable
+// title of the change (pull request/merge request/changelist). This title is
+// often a brief summary of the change and may get merged in to a ref as the
+// commit summary.
+func VCSChangeTitle(val string) attribute.KeyValue {
+ return VCSChangeTitleKey.String(val)
+}
+
+// VCSOwnerName returns an attribute KeyValue conforming to the "vcs.owner.name"
+// semantic conventions. It represents the group owner within the version control
+// system.
+func VCSOwnerName(val string) attribute.KeyValue {
+ return VCSOwnerNameKey.String(val)
+}
+
+// VCSRefBaseName returns an attribute KeyValue conforming to the
+// "vcs.ref.base.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefBaseName(val string) attribute.KeyValue {
+ return VCSRefBaseNameKey.String(val)
+}
+
+// VCSRefBaseRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.base.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefBaseRevision(val string) attribute.KeyValue {
+ return VCSRefBaseRevisionKey.String(val)
+}
+
+// VCSRefHeadName returns an attribute KeyValue conforming to the
+// "vcs.ref.head.name" semantic conventions. It represents the name of the
+// [reference] such as **branch** or **tag** in the repository.
+//
+// [reference]: https://git-scm.com/docs/gitglossary#def_ref
+func VCSRefHeadName(val string) attribute.KeyValue {
+ return VCSRefHeadNameKey.String(val)
+}
+
+// VCSRefHeadRevision returns an attribute KeyValue conforming to the
+// "vcs.ref.head.revision" semantic conventions. It represents the revision,
+// literally [revised version], The revision most often refers to a commit object
+// in Git, or a revision number in SVN.
+//
+// [revised version]: https://www.merriam-webster.com/dictionary/revision
+func VCSRefHeadRevision(val string) attribute.KeyValue {
+ return VCSRefHeadRevisionKey.String(val)
+}
+
+// VCSRepositoryName returns an attribute KeyValue conforming to the
+// "vcs.repository.name" semantic conventions. It represents the human readable
+// name of the repository. It SHOULD NOT include any additional identifier like
+// Group/SubGroup in GitLab or organization in GitHub.
+func VCSRepositoryName(val string) attribute.KeyValue {
+ return VCSRepositoryNameKey.String(val)
+}
+
+// VCSRepositoryURLFull returns an attribute KeyValue conforming to the
+// "vcs.repository.url.full" semantic conventions. It represents the
+// [canonical URL] of the repository providing the complete HTTP(S) address in
+// order to locate and identify the repository through a browser.
+//
+// [canonical URL]: https://support.google.com/webmasters/answer/10347851?hl=en#:~:text=A%20canonical%20URL%20is%20the,Google%20chooses%20one%20as%20canonical.
+func VCSRepositoryURLFull(val string) attribute.KeyValue {
+ return VCSRepositoryURLFullKey.String(val)
+}
+
+// Enum values for vcs.change.state
+var (
+ // Open means the change is currently active and under review. It hasn't been
+ // merged into the target branch yet, and it's still possible to make changes or
+ // add comments.
+ // Stability: development
+ VCSChangeStateOpen = VCSChangeStateKey.String("open")
+ // WIP (work-in-progress, draft) means the change is still in progress and not
+ // yet ready for a full review. It might still undergo significant changes.
+ // Stability: development
+ VCSChangeStateWip = VCSChangeStateKey.String("wip")
+ // Closed means the merge request has been closed without merging. This can
+ // happen for various reasons, such as the changes being deemed unnecessary, the
+ // issue being resolved in another way, or the author deciding to withdraw the
+ // request.
+ // Stability: development
+ VCSChangeStateClosed = VCSChangeStateKey.String("closed")
+ // Merged indicates that the change has been successfully integrated into the
+ // target codebase.
+ // Stability: development
+ VCSChangeStateMerged = VCSChangeStateKey.String("merged")
+)
+
+// Enum values for vcs.line_change.type
+var (
+ // How many lines were added.
+ // Stability: development
+ VCSLineChangeTypeAdded = VCSLineChangeTypeKey.String("added")
+ // How many lines were removed.
+ // Stability: development
+ VCSLineChangeTypeRemoved = VCSLineChangeTypeKey.String("removed")
+)
+
+// Enum values for vcs.provider.name
+var (
+ // [GitHub]
+ // Stability: development
+ //
+ // [GitHub]: https://github.com
+ VCSProviderNameGithub = VCSProviderNameKey.String("github")
+ // [GitLab]
+ // Stability: development
+ //
+ // [GitLab]: https://gitlab.com
+ VCSProviderNameGitlab = VCSProviderNameKey.String("gitlab")
+ // [Gitea]
+ // Stability: development
+ //
+ // [Gitea]: https://gitea.io
+ VCSProviderNameGitea = VCSProviderNameKey.String("gitea")
+ // [Bitbucket]
+ // Stability: development
+ //
+ // [Bitbucket]: https://bitbucket.org
+ VCSProviderNameBitbucket = VCSProviderNameKey.String("bitbucket")
+)
+
+// Enum values for vcs.ref.base.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefBaseTypeBranch = VCSRefBaseTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefBaseTypeTag = VCSRefBaseTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.head.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefHeadTypeBranch = VCSRefHeadTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefHeadTypeTag = VCSRefHeadTypeKey.String("tag")
+)
+
+// Enum values for vcs.ref.type
+var (
+ // [branch]
+ // Stability: development
+ //
+ // [branch]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefbranchabranch
+ VCSRefTypeBranch = VCSRefTypeKey.String("branch")
+ // [tag]
+ // Stability: development
+ //
+ // [tag]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddeftagatag
+ VCSRefTypeTag = VCSRefTypeKey.String("tag")
+)
+
+// Enum values for vcs.revision_delta.direction
+var (
+ // How many revisions the change is behind the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionBehind = VCSRevisionDeltaDirectionKey.String("behind")
+ // How many revisions the change is ahead of the target ref.
+ // Stability: development
+ VCSRevisionDeltaDirectionAhead = VCSRevisionDeltaDirectionKey.String("ahead")
+)
+
+// Namespace: webengine
+const (
+ // WebEngineDescriptionKey is the attribute Key conforming to the
+ // "webengine.description" semantic conventions. It represents the additional
+ // description of the web engine (e.g. detailed version and edition
+ // information).
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) -
+ // 2.2.2.Final"
+ WebEngineDescriptionKey = attribute.Key("webengine.description")
+
+ // WebEngineNameKey is the attribute Key conforming to the "webengine.name"
+ // semantic conventions. It represents the name of the web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "WildFly"
+ WebEngineNameKey = attribute.Key("webengine.name")
+
+ // WebEngineVersionKey is the attribute Key conforming to the
+ // "webengine.version" semantic conventions. It represents the version of the
+ // web engine.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "21.0.0"
+ WebEngineVersionKey = attribute.Key("webengine.version")
+)
+
+// WebEngineDescription returns an attribute KeyValue conforming to the
+// "webengine.description" semantic conventions. It represents the additional
+// description of the web engine (e.g. detailed version and edition information).
+func WebEngineDescription(val string) attribute.KeyValue {
+ return WebEngineDescriptionKey.String(val)
+}
+
+// WebEngineName returns an attribute KeyValue conforming to the "webengine.name"
+// semantic conventions. It represents the name of the web engine.
+func WebEngineName(val string) attribute.KeyValue {
+ return WebEngineNameKey.String(val)
+}
+
+// WebEngineVersion returns an attribute KeyValue conforming to the
+// "webengine.version" semantic conventions. It represents the version of the web
+// engine.
+func WebEngineVersion(val string) attribute.KeyValue {
+ return WebEngineVersionKey.String(val)
+}
+
+// Namespace: zos
+const (
+ // ZOSSmfIDKey is the attribute Key conforming to the "zos.smf.id" semantic
+ // conventions. It represents the System Management Facility (SMF) Identifier
+ // uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+ // and is used for system and performance analysis.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYS1"
+ ZOSSmfIDKey = attribute.Key("zos.smf.id")
+
+ // ZOSSysplexNameKey is the attribute Key conforming to the "zos.sysplex.name"
+ // semantic conventions. It represents the name of the SYSPLEX to which the z/OS
+ // system belongs too.
+ //
+ // Type: string
+ // RequirementLevel: Recommended
+ // Stability: Development
+ //
+ // Examples: "SYSPLEX1"
+ ZOSSysplexNameKey = attribute.Key("zos.sysplex.name")
+)
+
+// ZOSSmfID returns an attribute KeyValue conforming to the "zos.smf.id" semantic
+// conventions. It represents the System Management Facility (SMF) Identifier
+// uniquely identified a z/OS system within a SYSPLEX or mainframe environment
+// and is used for system and performance analysis.
+func ZOSSmfID(val string) attribute.KeyValue {
+ return ZOSSmfIDKey.String(val)
+}
+
+// ZOSSysplexName returns an attribute KeyValue conforming to the
+// "zos.sysplex.name" semantic conventions. It represents the name of the SYSPLEX
+// to which the z/OS system belongs too.
+func ZOSSysplexName(val string) attribute.KeyValue {
+ return ZOSSysplexNameKey.String(val)
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
similarity index 80%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
index d031bbea7..111010321 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/doc.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/doc.go
@@ -4,6 +4,6 @@
// Package semconv implements OpenTelemetry semantic conventions.
//
// OpenTelemetry semantic conventions are agreed standardized naming
-// patterns for OpenTelemetry things. This package represents the v1.26.0
+// patterns for OpenTelemetry things. This package represents the v1.37.0
// version of the OpenTelemetry semantic conventions.
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
new file mode 100644
index 000000000..666bded4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
@@ -0,0 +1,31 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute"
+)
+
+// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+func ErrorType(err error) attribute.KeyValue {
+ if err == nil {
+ return ErrorTypeOther
+ }
+ t := reflect.TypeOf(err)
+ var value string
+ if t.PkgPath() == "" && t.Name() == "" {
+ // Likely a builtin type.
+ value = t.String()
+ } else {
+ value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+ }
+
+ if value == "" {
+ return ErrorTypeOther
+ }
+ return ErrorTypeKey.String(value)
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
similarity index 74%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
index bfaee0d56..e67469a4f 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/exception.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/exception.go
@@ -1,7 +1,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
const (
// ExceptionEventName is the name of the Span event representing an exception.
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
new file mode 100644
index 000000000..a78eafd1f
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
@@ -0,0 +1,2126 @@
+// Code generated from semantic convention specification. DO NOT EDIT.
+
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package httpconv provides types and functionality for OpenTelemetry semantic
+// conventions in the "otel" namespace.
+package otelconv
+
+import (
+ "context"
+ "sync"
+
+ "go.opentelemetry.io/otel/attribute"
+ "go.opentelemetry.io/otel/metric"
+ "go.opentelemetry.io/otel/metric/noop"
+)
+
+var (
+ addOptPool = &sync.Pool{New: func() any { return &[]metric.AddOption{} }}
+ recOptPool = &sync.Pool{New: func() any { return &[]metric.RecordOption{} }}
+)
+
+// ErrorTypeAttr is an attribute conforming to the error.type semantic
+// conventions. It represents the describes a class of error the operation ended
+// with.
+type ErrorTypeAttr string
+
+var (
+ // ErrorTypeOther is a fallback error value to be used when the instrumentation
+ // doesn't define a custom value.
+ ErrorTypeOther ErrorTypeAttr = "_OTHER"
+)
+
+// ComponentTypeAttr is an attribute conforming to the otel.component.type
+// semantic conventions. It represents a name identifying the type of the
+// OpenTelemetry component.
+type ComponentTypeAttr string
+
+var (
+ // ComponentTypeBatchingSpanProcessor is the builtin SDK batching span
+ // processor.
+ ComponentTypeBatchingSpanProcessor ComponentTypeAttr = "batching_span_processor"
+ // ComponentTypeSimpleSpanProcessor is the builtin SDK simple span processor.
+ ComponentTypeSimpleSpanProcessor ComponentTypeAttr = "simple_span_processor"
+ // ComponentTypeBatchingLogProcessor is the builtin SDK batching log record
+ // processor.
+ ComponentTypeBatchingLogProcessor ComponentTypeAttr = "batching_log_processor"
+ // ComponentTypeSimpleLogProcessor is the builtin SDK simple log record
+ // processor.
+ ComponentTypeSimpleLogProcessor ComponentTypeAttr = "simple_log_processor"
+ // ComponentTypeOtlpGRPCSpanExporter is the OTLP span exporter over gRPC with
+ // protobuf serialization.
+ ComponentTypeOtlpGRPCSpanExporter ComponentTypeAttr = "otlp_grpc_span_exporter"
+ // ComponentTypeOtlpHTTPSpanExporter is the OTLP span exporter over HTTP with
+ // protobuf serialization.
+ ComponentTypeOtlpHTTPSpanExporter ComponentTypeAttr = "otlp_http_span_exporter"
+ // ComponentTypeOtlpHTTPJSONSpanExporter is the OTLP span exporter over HTTP
+ // with JSON serialization.
+ ComponentTypeOtlpHTTPJSONSpanExporter ComponentTypeAttr = "otlp_http_json_span_exporter"
+ // ComponentTypeZipkinHTTPSpanExporter is the zipkin span exporter over HTTP.
+ ComponentTypeZipkinHTTPSpanExporter ComponentTypeAttr = "zipkin_http_span_exporter"
+ // ComponentTypeOtlpGRPCLogExporter is the OTLP log record exporter over gRPC
+ // with protobuf serialization.
+ ComponentTypeOtlpGRPCLogExporter ComponentTypeAttr = "otlp_grpc_log_exporter"
+ // ComponentTypeOtlpHTTPLogExporter is the OTLP log record exporter over HTTP
+ // with protobuf serialization.
+ ComponentTypeOtlpHTTPLogExporter ComponentTypeAttr = "otlp_http_log_exporter"
+ // ComponentTypeOtlpHTTPJSONLogExporter is the OTLP log record exporter over
+ // HTTP with JSON serialization.
+ ComponentTypeOtlpHTTPJSONLogExporter ComponentTypeAttr = "otlp_http_json_log_exporter"
+ // ComponentTypePeriodicMetricReader is the builtin SDK periodically exporting
+ // metric reader.
+ ComponentTypePeriodicMetricReader ComponentTypeAttr = "periodic_metric_reader"
+ // ComponentTypeOtlpGRPCMetricExporter is the OTLP metric exporter over gRPC
+ // with protobuf serialization.
+ ComponentTypeOtlpGRPCMetricExporter ComponentTypeAttr = "otlp_grpc_metric_exporter"
+ // ComponentTypeOtlpHTTPMetricExporter is the OTLP metric exporter over HTTP
+ // with protobuf serialization.
+ ComponentTypeOtlpHTTPMetricExporter ComponentTypeAttr = "otlp_http_metric_exporter"
+ // ComponentTypeOtlpHTTPJSONMetricExporter is the OTLP metric exporter over HTTP
+ // with JSON serialization.
+ ComponentTypeOtlpHTTPJSONMetricExporter ComponentTypeAttr = "otlp_http_json_metric_exporter"
+ // ComponentTypePrometheusHTTPTextMetricExporter is the prometheus metric
+ // exporter over HTTP with the default text-based format.
+ ComponentTypePrometheusHTTPTextMetricExporter ComponentTypeAttr = "prometheus_http_text_metric_exporter"
+)
+
+// SpanParentOriginAttr is an attribute conforming to the otel.span.parent.origin
+// semantic conventions. It represents the determines whether the span has a
+// parent span, and if so, [whether it is a remote parent].
+//
+// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+type SpanParentOriginAttr string
+
+var (
+ // SpanParentOriginNone is the span does not have a parent, it is a root span.
+ SpanParentOriginNone SpanParentOriginAttr = "none"
+ // SpanParentOriginLocal is the span has a parent and the parent's span context
+ // [isRemote()] is false.
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ SpanParentOriginLocal SpanParentOriginAttr = "local"
+ // SpanParentOriginRemote is the span has a parent and the parent's span context
+ // [isRemote()] is true.
+ //
+ // [isRemote()]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+ SpanParentOriginRemote SpanParentOriginAttr = "remote"
+)
+
+// SpanSamplingResultAttr is an attribute conforming to the
+// otel.span.sampling_result semantic conventions. It represents the result value
+// of the sampler for this span.
+type SpanSamplingResultAttr string
+
+var (
+ // SpanSamplingResultDrop is the span is not sampled and not recording.
+ SpanSamplingResultDrop SpanSamplingResultAttr = "DROP"
+ // SpanSamplingResultRecordOnly is the span is not sampled, but recording.
+ SpanSamplingResultRecordOnly SpanSamplingResultAttr = "RECORD_ONLY"
+ // SpanSamplingResultRecordAndSample is the span is sampled and recording.
+ SpanSamplingResultRecordAndSample SpanSamplingResultAttr = "RECORD_AND_SAMPLE"
+)
+
+// RPCGRPCStatusCodeAttr is an attribute conforming to the rpc.grpc.status_code
+// semantic conventions. It represents the gRPC status code of the last gRPC
+// requests performed in scope of this export call.
+type RPCGRPCStatusCodeAttr int64
+
+var (
+ // RPCGRPCStatusCodeOk is the OK.
+ RPCGRPCStatusCodeOk RPCGRPCStatusCodeAttr = 0
+ // RPCGRPCStatusCodeCancelled is the CANCELLED.
+ RPCGRPCStatusCodeCancelled RPCGRPCStatusCodeAttr = 1
+ // RPCGRPCStatusCodeUnknown is the UNKNOWN.
+ RPCGRPCStatusCodeUnknown RPCGRPCStatusCodeAttr = 2
+ // RPCGRPCStatusCodeInvalidArgument is the INVALID_ARGUMENT.
+ RPCGRPCStatusCodeInvalidArgument RPCGRPCStatusCodeAttr = 3
+ // RPCGRPCStatusCodeDeadlineExceeded is the DEADLINE_EXCEEDED.
+ RPCGRPCStatusCodeDeadlineExceeded RPCGRPCStatusCodeAttr = 4
+ // RPCGRPCStatusCodeNotFound is the NOT_FOUND.
+ RPCGRPCStatusCodeNotFound RPCGRPCStatusCodeAttr = 5
+ // RPCGRPCStatusCodeAlreadyExists is the ALREADY_EXISTS.
+ RPCGRPCStatusCodeAlreadyExists RPCGRPCStatusCodeAttr = 6
+ // RPCGRPCStatusCodePermissionDenied is the PERMISSION_DENIED.
+ RPCGRPCStatusCodePermissionDenied RPCGRPCStatusCodeAttr = 7
+ // RPCGRPCStatusCodeResourceExhausted is the RESOURCE_EXHAUSTED.
+ RPCGRPCStatusCodeResourceExhausted RPCGRPCStatusCodeAttr = 8
+ // RPCGRPCStatusCodeFailedPrecondition is the FAILED_PRECONDITION.
+ RPCGRPCStatusCodeFailedPrecondition RPCGRPCStatusCodeAttr = 9
+ // RPCGRPCStatusCodeAborted is the ABORTED.
+ RPCGRPCStatusCodeAborted RPCGRPCStatusCodeAttr = 10
+ // RPCGRPCStatusCodeOutOfRange is the OUT_OF_RANGE.
+ RPCGRPCStatusCodeOutOfRange RPCGRPCStatusCodeAttr = 11
+ // RPCGRPCStatusCodeUnimplemented is the UNIMPLEMENTED.
+ RPCGRPCStatusCodeUnimplemented RPCGRPCStatusCodeAttr = 12
+ // RPCGRPCStatusCodeInternal is the INTERNAL.
+ RPCGRPCStatusCodeInternal RPCGRPCStatusCodeAttr = 13
+ // RPCGRPCStatusCodeUnavailable is the UNAVAILABLE.
+ RPCGRPCStatusCodeUnavailable RPCGRPCStatusCodeAttr = 14
+ // RPCGRPCStatusCodeDataLoss is the DATA_LOSS.
+ RPCGRPCStatusCodeDataLoss RPCGRPCStatusCodeAttr = 15
+ // RPCGRPCStatusCodeUnauthenticated is the UNAUTHENTICATED.
+ RPCGRPCStatusCodeUnauthenticated RPCGRPCStatusCodeAttr = 16
+)
+
+// SDKExporterLogExported is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.log.exported" semantic conventions. It
+// represents the number of log records for which the export has finished, either
+// successful or failed.
+type SDKExporterLogExported struct {
+ metric.Int64Counter
+}
+
+// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument.
+func NewSDKExporterLogExported(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKExporterLogExported, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterLogExported{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.exporter.log.exported",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterLogExported{noop.Int64Counter{}}, err
+ }
+ return SDKExporterLogExported{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterLogExported) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterLogExported) Name() string {
+ return "otel.sdk.exporter.log.exported"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterLogExported) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterLogExported) Description() string {
+ return "The number of log records for which the export has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_log_records`), rejected log records MUST count as failed and only
+// non-rejected log records count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterLogExported) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_log_records`), rejected log records MUST count as failed and only
+// non-rejected log records count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterLogExported) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterLogExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterLogExported) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterLogExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterLogExported) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterLogExported) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterLogInflight is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.log.inflight" semantic conventions. It
+// represents the number of log records which were passed to the exporter, but
+// that have not been exported yet (neither successful, nor failed).
+type SDKExporterLogInflight struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument.
+func NewSDKExporterLogInflight(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKExporterLogInflight, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.exporter.log.inflight",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKExporterLogInflight{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterLogInflight) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterLogInflight) Name() string {
+ return "otel.sdk.exporter.log.inflight"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterLogInflight) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterLogInflight) Description() string {
+ return "The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterLogInflight) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterLogInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterLogInflight) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterLogInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterLogInflight) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterLogInflight) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterMetricDataPointExported is an instrument used to record metric
+// values conforming to the "otel.sdk.exporter.metric_data_point.exported"
+// semantic conventions. It represents the number of metric data points for which
+// the export has finished, either successful or failed.
+type SDKExporterMetricDataPointExported struct {
+ metric.Int64Counter
+}
+
+// NewSDKExporterMetricDataPointExported returns a new
+// SDKExporterMetricDataPointExported instrument.
+func NewSDKExporterMetricDataPointExported(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKExporterMetricDataPointExported, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.exporter.metric_data_point.exported",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
+ metric.WithUnit("{data_point}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
+ }
+ return SDKExporterMetricDataPointExported{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterMetricDataPointExported) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterMetricDataPointExported) Name() string {
+ return "otel.sdk.exporter.metric_data_point.exported"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterMetricDataPointExported) Unit() string {
+ return "{data_point}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterMetricDataPointExported) Description() string {
+ return "The number of metric data points for which the export has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_data_points`), rejected data points MUST count as failed and only
+// non-rejected data points count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterMetricDataPointExported) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with
+// `rejected_data_points`), rejected data points MUST count as failed and only
+// non-rejected data points count as success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterMetricDataPointExported) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterMetricDataPointExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterMetricDataPointExported) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterMetricDataPointExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterMetricDataPointExported) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterMetricDataPointExported) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterMetricDataPointInflight is an instrument used to record metric
+// values conforming to the "otel.sdk.exporter.metric_data_point.inflight"
+// semantic conventions. It represents the number of metric data points which
+// were passed to the exporter, but that have not been exported yet (neither
+// successful, nor failed).
+type SDKExporterMetricDataPointInflight struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKExporterMetricDataPointInflight returns a new
+// SDKExporterMetricDataPointInflight instrument.
+func NewSDKExporterMetricDataPointInflight(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKExporterMetricDataPointInflight, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.exporter.metric_data_point.inflight",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{data_point}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKExporterMetricDataPointInflight{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterMetricDataPointInflight) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterMetricDataPointInflight) Name() string {
+ return "otel.sdk.exporter.metric_data_point.inflight"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterMetricDataPointInflight) Unit() string {
+ return "{data_point}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterMetricDataPointInflight) Description() string {
+ return "The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterMetricDataPointInflight) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterMetricDataPointInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterMetricDataPointInflight) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterMetricDataPointInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterMetricDataPointInflight) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterMetricDataPointInflight) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterOperationDuration is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.operation.duration" semantic conventions.
+// It represents the duration of exporting a batch of telemetry records.
+type SDKExporterOperationDuration struct {
+ metric.Float64Histogram
+}
+
+// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration
+// instrument.
+func NewSDKExporterOperationDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (SDKExporterOperationDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "otel.sdk.exporter.operation.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of exporting a batch of telemetry records."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
+ }
+ return SDKExporterOperationDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterOperationDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterOperationDuration) Name() string {
+ return "otel.sdk.exporter.operation.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterOperationDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterOperationDuration) Description() string {
+ return "The duration of exporting a batch of telemetry records."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// This metric defines successful operations using the full success definitions
+// for [http]
+// and [grpc]. Anything else is defined as an unsuccessful operation. For
+// successful
+// operations, `error.type` MUST NOT be set. For unsuccessful export operations,
+// `error.type` MUST contain a relevant failure cause.
+//
+// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1
+// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success
+func (m SDKExporterOperationDuration) Record(
+ ctx context.Context,
+ val float64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// This metric defines successful operations using the full success definitions
+// for [http]
+// and [grpc]. Anything else is defined as an unsuccessful operation. For
+// successful
+// operations, `error.type` MUST NOT be set. For unsuccessful export operations,
+// `error.type` MUST contain a relevant failure cause.
+//
+// [http]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success-1
+// [grpc]: https://github.com/open-telemetry/opentelemetry-proto/blob/v1.5.0/docs/specification.md#full-success
+func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterOperationDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrHTTPResponseStatusCode returns an optional attribute for the
+// "http.response.status_code" semantic convention. It represents the HTTP status
+// code of the last HTTP request performed in scope of this export call.
+func (SDKExporterOperationDuration) AttrHTTPResponseStatusCode(val int) attribute.KeyValue {
+ return attribute.Int("http.response.status_code", val)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterOperationDuration) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterOperationDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrRPCGRPCStatusCode returns an optional attribute for the
+// "rpc.grpc.status_code" semantic convention. It represents the gRPC status code
+// of the last gRPC requests performed in scope of this export call.
+func (SDKExporterOperationDuration) AttrRPCGRPCStatusCode(val RPCGRPCStatusCodeAttr) attribute.KeyValue {
+ return attribute.Int64("rpc.grpc.status_code", int64(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterOperationDuration) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterOperationDuration) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterSpanExported is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.span.exported" semantic conventions. It
+// represents the number of spans for which the export has finished, either
+// successful or failed.
+type SDKExporterSpanExported struct {
+ metric.Int64Counter
+}
+
+// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument.
+func NewSDKExporterSpanExported(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKExporterSpanExported, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterSpanExported{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.exporter.span.exported",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterSpanExported{noop.Int64Counter{}}, err
+ }
+ return SDKExporterSpanExported{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterSpanExported) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterSpanExported) Name() string {
+ return "otel.sdk.exporter.span.exported"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterSpanExported) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterSpanExported) Description() string {
+ return "The number of spans for which the export has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with `rejected_spans`
+// ), rejected spans MUST count as failed and only non-rejected spans count as
+// success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterSpanExported) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+// For exporters with partial success semantics (e.g. OTLP with `rejected_spans`
+// ), rejected spans MUST count as failed and only non-rejected spans count as
+// success.
+// If no rejection reason is available, `rejected` SHOULD be used as value for
+// `error.type`.
+func (m SDKExporterSpanExported) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKExporterSpanExported) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterSpanExported) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterSpanExported) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterSpanExported) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterSpanExported) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKExporterSpanInflight is an instrument used to record metric values
+// conforming to the "otel.sdk.exporter.span.inflight" semantic conventions. It
+// represents the number of spans which were passed to the exporter, but that
+// have not been exported yet (neither successful, nor failed).
+type SDKExporterSpanInflight struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument.
+func NewSDKExporterSpanInflight(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKExporterSpanInflight, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.exporter.span.inflight",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKExporterSpanInflight{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKExporterSpanInflight) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKExporterSpanInflight) Name() string {
+ return "otel.sdk.exporter.span.inflight"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKExporterSpanInflight) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKExporterSpanInflight) Description() string {
+ return "The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterSpanInflight) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful exports, `error.type` MUST NOT be set. For failed exports,
+// `error.type` MUST contain the failure cause.
+func (m SDKExporterSpanInflight) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKExporterSpanInflight) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKExporterSpanInflight) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// AttrServerAddress returns an optional attribute for the "server.address"
+// semantic convention. It represents the server domain name if available without
+// reverse DNS lookup; otherwise, IP address or Unix domain socket name.
+func (SDKExporterSpanInflight) AttrServerAddress(val string) attribute.KeyValue {
+ return attribute.String("server.address", val)
+}
+
+// AttrServerPort returns an optional attribute for the "server.port" semantic
+// convention. It represents the server port number.
+func (SDKExporterSpanInflight) AttrServerPort(val int) attribute.KeyValue {
+ return attribute.Int("server.port", val)
+}
+
+// SDKLogCreated is an instrument used to record metric values conforming to the
+// "otel.sdk.log.created" semantic conventions. It represents the number of logs
+// submitted to enabled SDK Loggers.
+type SDKLogCreated struct {
+ metric.Int64Counter
+}
+
+// NewSDKLogCreated returns a new SDKLogCreated instrument.
+func NewSDKLogCreated(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKLogCreated, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKLogCreated{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.log.created",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKLogCreated{noop.Int64Counter{}}, err
+ }
+ return SDKLogCreated{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKLogCreated) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKLogCreated) Name() string {
+ return "otel.sdk.log.created"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKLogCreated) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKLogCreated) Description() string {
+ return "The number of logs submitted to enabled SDK Loggers."
+}
+
+// Add adds incr to the existing count for attrs.
+func (m SDKLogCreated) Add(ctx context.Context, incr int64, attrs ...attribute.KeyValue) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributes(attrs...))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m SDKLogCreated) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// SDKMetricReaderCollectionDuration is an instrument used to record metric
+// values conforming to the "otel.sdk.metric_reader.collection.duration" semantic
+// conventions. It represents the duration of the collect operation of the metric
+// reader.
+type SDKMetricReaderCollectionDuration struct {
+ metric.Float64Histogram
+}
+
+// NewSDKMetricReaderCollectionDuration returns a new
+// SDKMetricReaderCollectionDuration instrument.
+func NewSDKMetricReaderCollectionDuration(
+ m metric.Meter,
+ opt ...metric.Float64HistogramOption,
+) (SDKMetricReaderCollectionDuration, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil
+ }
+
+ i, err := m.Float64Histogram(
+ "otel.sdk.metric_reader.collection.duration",
+ append([]metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the collect operation of the metric reader."),
+ metric.WithUnit("s"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
+ }
+ return SDKMetricReaderCollectionDuration{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKMetricReaderCollectionDuration) Inst() metric.Float64Histogram {
+ return m.Float64Histogram
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKMetricReaderCollectionDuration) Name() string {
+ return "otel.sdk.metric_reader.collection.duration"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKMetricReaderCollectionDuration) Unit() string {
+ return "s"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKMetricReaderCollectionDuration) Description() string {
+ return "The duration of the collect operation of the metric reader."
+}
+
+// Record records val to the current distribution for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful collections, `error.type` MUST NOT be set. For failed
+// collections, `error.type` SHOULD contain the failure cause.
+// It can happen that metrics collection is successful for some MetricProducers,
+// while others fail. In that case `error.type` SHOULD be set to any of the
+// failure causes.
+func (m SDKMetricReaderCollectionDuration) Record(
+ ctx context.Context,
+ val float64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ return
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// RecordSet records val to the current distribution for set.
+//
+// For successful collections, `error.type` MUST NOT be set. For failed
+// collections, `error.type` SHOULD contain the failure cause.
+// It can happen that metrics collection is successful for some MetricProducers,
+// while others fail. In that case `error.type` SHOULD be set to any of the
+// failure causes.
+func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Float64Histogram.Record(ctx, val)
+ }
+
+ o := recOptPool.Get().(*[]metric.RecordOption)
+ defer func() {
+ *o = (*o)[:0]
+ recOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Float64Histogram.Record(ctx, val, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents the describes a class of error the operation ended
+// with.
+func (SDKMetricReaderCollectionDuration) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKMetricReaderCollectionDuration) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKMetricReaderCollectionDuration) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorLogProcessed is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.log.processed" semantic conventions. It
+// represents the number of log records for which the processing has finished,
+// either successful or failed.
+type SDKProcessorLogProcessed struct {
+ metric.Int64Counter
+}
+
+// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument.
+func NewSDKProcessorLogProcessed(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKProcessorLogProcessed, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.processor.log.processed",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
+ }
+ return SDKProcessorLogProcessed{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorLogProcessed) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorLogProcessed) Name() string {
+ return "otel.sdk.processor.log.processed"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorLogProcessed) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorLogProcessed) Description() string {
+ return "The number of log records for which the processing has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Log Record Processor a log record is
+// considered to be processed already when it has been submitted to the exporter,
+// not when the corresponding export call has finished.
+func (m SDKProcessorLogProcessed) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Log Record Processor a log record is
+// considered to be processed already when it has been submitted to the exporter,
+// not when the corresponding export call has finished.
+func (m SDKProcessorLogProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents a low-cardinality description of the failure reason.
+// SDK Batching Log Record Processors MUST use `queue_full` for log records
+// dropped due to a full queue.
+func (SDKProcessorLogProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorLogProcessed) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorLogProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorLogQueueCapacity is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.log.queue.capacity" semantic
+// conventions. It represents the maximum number of log records the queue of a
+// given instance of an SDK Log Record processor can hold.
+type SDKProcessorLogQueueCapacity struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity
+// instrument.
+func NewSDKProcessorLogQueueCapacity(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorLogQueueCapacity, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.log.queue.capacity",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorLogQueueCapacity{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorLogQueueCapacity) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorLogQueueCapacity) Name() string {
+ return "otel.sdk.processor.log.queue.capacity"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorLogQueueCapacity) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorLogQueueCapacity) Description() string {
+ return "The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorLogQueueCapacity) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorLogQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorLogQueueSize is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.log.queue.size" semantic conventions. It
+// represents the number of log records in the queue of a given instance of an
+// SDK log processor.
+type SDKProcessorLogQueueSize struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument.
+func NewSDKProcessorLogQueueSize(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorLogQueueSize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.log.queue.size",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
+ metric.WithUnit("{log_record}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorLogQueueSize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorLogQueueSize) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorLogQueueSize) Name() string {
+ return "otel.sdk.processor.log.queue.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorLogQueueSize) Unit() string {
+ return "{log_record}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorLogQueueSize) Description() string {
+ return "The number of log records in the queue of a given instance of an SDK log processor."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorLogQueueSize) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorLogQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorSpanProcessed is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.span.processed" semantic conventions. It
+// represents the number of spans for which the processing has finished, either
+// successful or failed.
+type SDKProcessorSpanProcessed struct {
+ metric.Int64Counter
+}
+
+// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed
+// instrument.
+func NewSDKProcessorSpanProcessed(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKProcessorSpanProcessed, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.processor.span.processed",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
+ }
+ return SDKProcessorSpanProcessed{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorSpanProcessed) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorSpanProcessed) Name() string {
+ return "otel.sdk.processor.span.processed"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorSpanProcessed) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorSpanProcessed) Description() string {
+ return "The number of spans for which the processing has finished, either successful or failed."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Span Processor a span is considered to be
+// processed already when it has been submitted to the exporter, not when the
+// corresponding export call has finished.
+func (m SDKProcessorSpanProcessed) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// For successful processing, `error.type` MUST NOT be set. For failed
+// processing, `error.type` MUST contain the failure cause.
+// For the SDK Simple and Batching Span Processor a span is considered to be
+// processed already when it has been submitted to the exporter, not when the
+// corresponding export call has finished.
+func (m SDKProcessorSpanProcessed) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrErrorType returns an optional attribute for the "error.type" semantic
+// convention. It represents a low-cardinality description of the failure reason.
+// SDK Batching Span Processors MUST use `queue_full` for spans dropped due to a
+// full queue.
+func (SDKProcessorSpanProcessed) AttrErrorType(val ErrorTypeAttr) attribute.KeyValue {
+ return attribute.String("error.type", string(val))
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorSpanProcessed) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorSpanProcessed) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorSpanQueueCapacity is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.span.queue.capacity" semantic
+// conventions. It represents the maximum number of spans the queue of a given
+// instance of an SDK span processor can hold.
+type SDKProcessorSpanQueueCapacity struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity
+// instrument.
+func NewSDKProcessorSpanQueueCapacity(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorSpanQueueCapacity, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.span.queue.capacity",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorSpanQueueCapacity{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorSpanQueueCapacity) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorSpanQueueCapacity) Name() string {
+ return "otel.sdk.processor.span.queue.capacity"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorSpanQueueCapacity) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorSpanQueueCapacity) Description() string {
+ return "The maximum number of spans the queue of a given instance of an SDK span processor can hold."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorSpanQueueCapacity) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorSpanQueueCapacity) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKProcessorSpanQueueSize is an instrument used to record metric values
+// conforming to the "otel.sdk.processor.span.queue.size" semantic conventions.
+// It represents the number of spans in the queue of a given instance of an SDK
+// span processor.
+type SDKProcessorSpanQueueSize struct {
+ metric.Int64ObservableUpDownCounter
+}
+
+// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize
+// instrument.
+func NewSDKProcessorSpanQueueSize(
+ m metric.Meter,
+ opt ...metric.Int64ObservableUpDownCounterOption,
+) (SDKProcessorSpanQueueSize, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64ObservableUpDownCounter(
+ "otel.sdk.processor.span.queue.size",
+ append([]metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ }
+ return SDKProcessorSpanQueueSize{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKProcessorSpanQueueSize) Inst() metric.Int64ObservableUpDownCounter {
+ return m.Int64ObservableUpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKProcessorSpanQueueSize) Name() string {
+ return "otel.sdk.processor.span.queue.size"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKProcessorSpanQueueSize) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKProcessorSpanQueueSize) Description() string {
+ return "The number of spans in the queue of a given instance of an SDK span processor."
+}
+
+// AttrComponentName returns an optional attribute for the "otel.component.name"
+// semantic convention. It represents a name uniquely identifying the instance of
+// the OpenTelemetry component within its containing SDK instance.
+func (SDKProcessorSpanQueueSize) AttrComponentName(val string) attribute.KeyValue {
+ return attribute.String("otel.component.name", val)
+}
+
+// AttrComponentType returns an optional attribute for the "otel.component.type"
+// semantic convention. It represents a name identifying the type of the
+// OpenTelemetry component.
+func (SDKProcessorSpanQueueSize) AttrComponentType(val ComponentTypeAttr) attribute.KeyValue {
+ return attribute.String("otel.component.type", string(val))
+}
+
+// SDKSpanLive is an instrument used to record metric values conforming to the
+// "otel.sdk.span.live" semantic conventions. It represents the number of created
+// spans with `recording=true` for which the end operation has not been called
+// yet.
+type SDKSpanLive struct {
+ metric.Int64UpDownCounter
+}
+
+// NewSDKSpanLive returns a new SDKSpanLive instrument.
+func NewSDKSpanLive(
+ m metric.Meter,
+ opt ...metric.Int64UpDownCounterOption,
+) (SDKSpanLive, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, nil
+ }
+
+ i, err := m.Int64UpDownCounter(
+ "otel.sdk.span.live",
+ append([]metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, err
+ }
+ return SDKSpanLive{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKSpanLive) Inst() metric.Int64UpDownCounter {
+ return m.Int64UpDownCounter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKSpanLive) Name() string {
+ return "otel.sdk.span.live"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKSpanLive) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKSpanLive) Description() string {
+ return "The number of created spans with `recording=true` for which the end operation has not been called yet."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+func (m SDKSpanLive) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+func (m SDKSpanLive) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64UpDownCounter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64UpDownCounter.Add(ctx, incr, *o...)
+}
+
+// AttrSpanSamplingResult returns an optional attribute for the
+// "otel.span.sampling_result" semantic convention. It represents the result
+// value of the sampler for this span.
+func (SDKSpanLive) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
+ return attribute.String("otel.span.sampling_result", string(val))
+}
+
+// SDKSpanStarted is an instrument used to record metric values conforming to the
+// "otel.sdk.span.started" semantic conventions. It represents the number of
+// created spans.
+type SDKSpanStarted struct {
+ metric.Int64Counter
+}
+
+// NewSDKSpanStarted returns a new SDKSpanStarted instrument.
+func NewSDKSpanStarted(
+ m metric.Meter,
+ opt ...metric.Int64CounterOption,
+) (SDKSpanStarted, error) {
+ // Check if the meter is nil.
+ if m == nil {
+ return SDKSpanStarted{noop.Int64Counter{}}, nil
+ }
+
+ i, err := m.Int64Counter(
+ "otel.sdk.span.started",
+ append([]metric.Int64CounterOption{
+ metric.WithDescription("The number of created spans."),
+ metric.WithUnit("{span}"),
+ }, opt...)...,
+ )
+ if err != nil {
+ return SDKSpanStarted{noop.Int64Counter{}}, err
+ }
+ return SDKSpanStarted{i}, nil
+}
+
+// Inst returns the underlying metric instrument.
+func (m SDKSpanStarted) Inst() metric.Int64Counter {
+ return m.Int64Counter
+}
+
+// Name returns the semantic convention name of the instrument.
+func (SDKSpanStarted) Name() string {
+ return "otel.sdk.span.started"
+}
+
+// Unit returns the semantic convention unit of the instrument
+func (SDKSpanStarted) Unit() string {
+ return "{span}"
+}
+
+// Description returns the semantic convention description of the instrument
+func (SDKSpanStarted) Description() string {
+ return "The number of created spans."
+}
+
+// Add adds incr to the existing count for attrs.
+//
+// All additional attrs passed are included in the recorded value.
+//
+// Implementations MUST record this metric for all spans, even for non-recording
+// ones.
+func (m SDKSpanStarted) Add(
+ ctx context.Context,
+ incr int64,
+ attrs ...attribute.KeyValue,
+) {
+ if len(attrs) == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(
+ *o,
+ metric.WithAttributes(
+ attrs...,
+ ),
+ )
+
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AddSet adds incr to the existing count for set.
+//
+// Implementations MUST record this metric for all spans, even for non-recording
+// ones.
+func (m SDKSpanStarted) AddSet(ctx context.Context, incr int64, set attribute.Set) {
+ if set.Len() == 0 {
+ m.Int64Counter.Add(ctx, incr)
+ return
+ }
+
+ o := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *o = (*o)[:0]
+ addOptPool.Put(o)
+ }()
+
+ *o = append(*o, metric.WithAttributeSet(set))
+ m.Int64Counter.Add(ctx, incr, *o...)
+}
+
+// AttrSpanParentOrigin returns an optional attribute for the
+// "otel.span.parent.origin" semantic convention. It represents the determines
+// whether the span has a parent span, and if so, [whether it is a remote parent]
+// .
+//
+// [whether it is a remote parent]: https://opentelemetry.io/docs/specs/otel/trace/api/#isremote
+func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.KeyValue {
+ return attribute.String("otel.span.parent.origin", string(val))
+}
+
+// AttrSpanSamplingResult returns an optional attribute for the
+// "otel.span.sampling_result" semantic convention. It represents the result
+// value of the sampler for this span.
+func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
+ return attribute.String("otel.span.sampling_result", string(val))
+}
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
similarity index 71%
rename from vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
rename to vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
index 4c87c7adc..f8a0b7044 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.26.0/schema.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/schema.go
@@ -1,9 +1,9 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-package semconv // import "go.opentelemetry.io/otel/semconv/v1.26.0"
+package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
// SchemaURL is the schema URL that matches the version of the semantic conventions
// that this package defines. Semconv packages starting from v1.4.0 must declare
// non-empty schema URL in the form https://opentelemetry.io/schemas/
-const SchemaURL = "https://opentelemetry.io/schemas/1.26.0"
+const SchemaURL = "https://opentelemetry.io/schemas/1.37.0"
diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE
index 261eeb9e9..f1aee0f11 100644
--- a/vendor/go.opentelemetry.io/otel/trace/LICENSE
+++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE
@@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
+
+--------------------------------------------------------------------------------
+
+Copyright 2009 The Go Authors.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google LLC nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
\ No newline at end of file
diff --git a/vendor/go.opentelemetry.io/otel/trace/auto.go b/vendor/go.opentelemetry.io/otel/trace/auto.go
index f3aa39813..8763936a8 100644
--- a/vendor/go.opentelemetry.io/otel/trace/auto.go
+++ b/vendor/go.opentelemetry.io/otel/trace/auto.go
@@ -20,7 +20,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
- semconv "go.opentelemetry.io/otel/semconv/v1.34.0"
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace/embedded"
"go.opentelemetry.io/otel/trace/internal/telemetry"
)
@@ -39,7 +39,7 @@ type autoTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = autoTracerProvider{}
-func (p autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
+func (autoTracerProvider) Tracer(name string, opts ...TracerOption) Tracer {
cfg := NewTracerConfig(opts...)
return autoTracer{
name: name,
@@ -81,7 +81,7 @@ func (t autoTracer) Start(ctx context.Context, name string, opts ...SpanStartOpt
// Expected to be implemented in eBPF.
//
//go:noinline
-func (t *autoTracer) start(
+func (*autoTracer) start(
ctx context.Context,
spanPtr *autoSpan,
psc *SpanContext,
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index 9c0b720a4..aea11a2b5 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -73,7 +73,7 @@ func (cfg *SpanConfig) Timestamp() time.Time {
return cfg.timestamp
}
-// StackTrace checks whether stack trace capturing is enabled.
+// StackTrace reports whether stack trace capturing is enabled.
func (cfg *SpanConfig) StackTrace() bool {
return cfg.stackTrace
}
@@ -154,7 +154,7 @@ func (cfg *EventConfig) Timestamp() time.Time {
return cfg.timestamp
}
-// StackTrace checks whether stack trace capturing is enabled.
+// StackTrace reports whether stack trace capturing is enabled.
func (cfg *EventConfig) StackTrace() bool {
return cfg.stackTrace
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/hex.go b/vendor/go.opentelemetry.io/otel/trace/hex.go
new file mode 100644
index 000000000..1cbef1d4b
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/trace/hex.go
@@ -0,0 +1,38 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package trace // import "go.opentelemetry.io/otel/trace"
+
+const (
+ // hexLU is a hex lookup table of the 16 lowercase hex digits.
+ // The character values of the string are indexed at the equivalent
+ // hexadecimal value they represent. This table efficiently encodes byte data
+ // into a string representation of hexadecimal.
+ hexLU = "0123456789abcdef"
+
+ // hexRev is a reverse hex lookup table for lowercase hex digits.
+ // The table is efficiently decodes a hexadecimal string into bytes.
+ // Valid hexadecimal characters are indexed at their respective values. All
+ // other invalid ASCII characters are represented with '\xff'.
+ //
+ // The '\xff' character is used as invalid because no valid character has
+ // the upper 4 bits set. Meaning, an efficient validation can be performed
+ // over multiple character parsing by checking these bits remain zero.
+ hexRev = "" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\x0a\x0b\x0c\x0d\x0e\x0f\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff" +
+ "\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff"
+)
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
index f663547b4..ff0f6eac6 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/attr.go
@@ -52,7 +52,7 @@ func Map(key string, value ...Attr) Attr {
return Attr{key, MapValue(value...)}
}
-// Equal returns if a is equal to b.
+// Equal reports whether a is equal to b.
func (a Attr) Equal(b Attr) bool {
return a.Key == b.Key && a.Value.Equal(b.Value)
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
index 7b1ae3c4e..bea56f2e7 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/id.go
@@ -22,7 +22,7 @@ func (tid TraceID) String() string {
return hex.EncodeToString(tid[:])
}
-// IsEmpty returns false if id contains at least one non-zero byte.
+// IsEmpty reports whether the TraceID contains only zero bytes.
func (tid TraceID) IsEmpty() bool {
return tid == [traceIDSize]byte{}
}
@@ -50,7 +50,7 @@ func (sid SpanID) String() string {
return hex.EncodeToString(sid[:])
}
-// IsEmpty returns true if the span ID contains at least one non-zero byte.
+// IsEmpty reports whether the SpanID contains only zero bytes.
func (sid SpanID) IsEmpty() bool {
return sid == [spanIDSize]byte{}
}
@@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
}
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
-func unmarshalJSON(dst []byte, src []byte) error {
+func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1]
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
index ae9ce102a..cb7927b81 100644
--- a/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
+++ b/vendor/go.opentelemetry.io/otel/trace/internal/telemetry/value.go
@@ -257,10 +257,10 @@ func (v Value) Kind() ValueKind {
}
}
-// Empty returns if v does not hold any value.
+// Empty reports whether v does not hold any value.
func (v Value) Empty() bool { return v.Kind() == ValueKindEmpty }
-// Equal returns if v is equal to w.
+// Equal reports whether v is equal to w.
func (v Value) Equal(w Value) bool {
k1 := v.Kind()
k2 := w.Kind()
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go
index 0f56e4dbb..400fab123 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop.go
@@ -26,7 +26,7 @@ type noopTracerProvider struct{ embedded.TracerProvider }
var _ TracerProvider = noopTracerProvider{}
// Tracer returns noop implementation of Tracer.
-func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
+func (noopTracerProvider) Tracer(string, ...TracerOption) Tracer {
return noopTracer{}
}
@@ -37,7 +37,7 @@ var _ Tracer = noopTracer{}
// Start carries forward a non-recording Span, if one is present in the context, otherwise it
// creates a no-op Span.
-func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) {
+func (noopTracer) Start(ctx context.Context, _ string, _ ...SpanStartOption) (context.Context, Span) {
span := SpanFromContext(ctx)
if _, ok := span.(nonRecordingSpan); !ok {
// span is likely already a noopSpan, but let's be sure
diff --git a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
index 64a4f1b36..689d220df 100644
--- a/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
+++ b/vendor/go.opentelemetry.io/otel/trace/noop/noop.go
@@ -51,7 +51,7 @@ type Tracer struct{ embedded.Tracer }
// If ctx contains a span context, the returned span will also contain that
// span context. If the span context in ctx is for a non-recording span, that
// span instance will be returned directly.
-func (t Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
+func (Tracer) Start(ctx context.Context, _ string, _ ...trace.SpanStartOption) (context.Context, trace.Span) {
span := trace.SpanFromContext(ctx)
// If the parent context contains a non-zero span context, that span
diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go
index d49adf671..ee6f4bcb2 100644
--- a/vendor/go.opentelemetry.io/otel/trace/trace.go
+++ b/vendor/go.opentelemetry.io/otel/trace/trace.go
@@ -4,8 +4,6 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
- "bytes"
- "encoding/hex"
"encoding/json"
)
@@ -38,21 +36,47 @@ var (
_ json.Marshaler = nilTraceID
)
-// IsValid checks whether the trace TraceID is valid. A valid trace ID does
+// IsValid reports whether the trace TraceID is valid. A valid trace ID does
// not consist of zeros only.
func (t TraceID) IsValid() bool {
- return !bytes.Equal(t[:], nilTraceID[:])
+ return t != nilTraceID
}
// MarshalJSON implements a custom marshal function to encode TraceID
// as a hex string.
func (t TraceID) MarshalJSON() ([]byte, error) {
- return json.Marshal(t.String())
+ b := [32 + 2]byte{0: '"', 33: '"'}
+ h := t.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of a TraceID.
func (t TraceID) String() string {
- return hex.EncodeToString(t[:])
+ h := t.hexBytes()
+ return string(h[:])
+}
+
+// hexBytes returns the hex string representation form of a TraceID.
+func (t TraceID) hexBytes() [32]byte {
+ return [32]byte{
+ hexLU[t[0x0]>>4], hexLU[t[0x0]&0xf],
+ hexLU[t[0x1]>>4], hexLU[t[0x1]&0xf],
+ hexLU[t[0x2]>>4], hexLU[t[0x2]&0xf],
+ hexLU[t[0x3]>>4], hexLU[t[0x3]&0xf],
+ hexLU[t[0x4]>>4], hexLU[t[0x4]&0xf],
+ hexLU[t[0x5]>>4], hexLU[t[0x5]&0xf],
+ hexLU[t[0x6]>>4], hexLU[t[0x6]&0xf],
+ hexLU[t[0x7]>>4], hexLU[t[0x7]&0xf],
+ hexLU[t[0x8]>>4], hexLU[t[0x8]&0xf],
+ hexLU[t[0x9]>>4], hexLU[t[0x9]&0xf],
+ hexLU[t[0xa]>>4], hexLU[t[0xa]&0xf],
+ hexLU[t[0xb]>>4], hexLU[t[0xb]&0xf],
+ hexLU[t[0xc]>>4], hexLU[t[0xc]&0xf],
+ hexLU[t[0xd]>>4], hexLU[t[0xd]&0xf],
+ hexLU[t[0xe]>>4], hexLU[t[0xe]&0xf],
+ hexLU[t[0xf]>>4], hexLU[t[0xf]&0xf],
+ }
}
// SpanID is a unique identity of a span in a trace.
@@ -63,21 +87,38 @@ var (
_ json.Marshaler = nilSpanID
)
-// IsValid checks whether the SpanID is valid. A valid SpanID does not consist
+// IsValid reports whether the SpanID is valid. A valid SpanID does not consist
// of zeros only.
func (s SpanID) IsValid() bool {
- return !bytes.Equal(s[:], nilSpanID[:])
+ return s != nilSpanID
}
// MarshalJSON implements a custom marshal function to encode SpanID
// as a hex string.
func (s SpanID) MarshalJSON() ([]byte, error) {
- return json.Marshal(s.String())
+ b := [16 + 2]byte{0: '"', 17: '"'}
+ h := s.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of a SpanID.
func (s SpanID) String() string {
- return hex.EncodeToString(s[:])
+ b := s.hexBytes()
+ return string(b[:])
+}
+
+func (s SpanID) hexBytes() [16]byte {
+ return [16]byte{
+ hexLU[s[0]>>4], hexLU[s[0]&0xf],
+ hexLU[s[1]>>4], hexLU[s[1]&0xf],
+ hexLU[s[2]>>4], hexLU[s[2]&0xf],
+ hexLU[s[3]>>4], hexLU[s[3]&0xf],
+ hexLU[s[4]>>4], hexLU[s[4]&0xf],
+ hexLU[s[5]>>4], hexLU[s[5]&0xf],
+ hexLU[s[6]>>4], hexLU[s[6]&0xf],
+ hexLU[s[7]>>4], hexLU[s[7]&0xf],
+ }
}
// TraceIDFromHex returns a TraceID from a hex string if it is compliant with
@@ -85,65 +126,58 @@ func (s SpanID) String() string {
// https://www.w3.org/TR/trace-context/#trace-id
// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`.
func TraceIDFromHex(h string) (TraceID, error) {
- t := TraceID{}
if len(h) != 32 {
- return t, errInvalidTraceIDLength
+ return [16]byte{}, errInvalidTraceIDLength
}
-
- if err := decodeHex(h, t[:]); err != nil {
- return t, err
+ var b [16]byte
+ invalidMark := byte(0)
+ for i := 0; i < len(h); i += 4 {
+ b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]]
+ b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]]
+ invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]]
}
-
- if !t.IsValid() {
- return t, errNilTraceID
+ // If the upper 4 bits of any byte are not zero, there was an invalid hex
+ // character since invalid hex characters are 0xff in hexRev.
+ if invalidMark&0xf0 != 0 {
+ return [16]byte{}, errInvalidHexID
+ }
+ // If we didn't set any bits, then h was all zeros.
+ if invalidMark == 0 {
+ return [16]byte{}, errNilTraceID
}
- return t, nil
+ return b, nil
}
// SpanIDFromHex returns a SpanID from a hex string if it is compliant
// with the w3c trace-context specification.
// See more at https://www.w3.org/TR/trace-context/#parent-id
func SpanIDFromHex(h string) (SpanID, error) {
- s := SpanID{}
if len(h) != 16 {
- return s, errInvalidSpanIDLength
- }
-
- if err := decodeHex(h, s[:]); err != nil {
- return s, err
+ return [8]byte{}, errInvalidSpanIDLength
}
-
- if !s.IsValid() {
- return s, errNilSpanID
+ var b [8]byte
+ invalidMark := byte(0)
+ for i := 0; i < len(h); i += 4 {
+ b[i/2] = (hexRev[h[i]] << 4) | hexRev[h[i+1]]
+ b[i/2+1] = (hexRev[h[i+2]] << 4) | hexRev[h[i+3]]
+ invalidMark |= hexRev[h[i]] | hexRev[h[i+1]] | hexRev[h[i+2]] | hexRev[h[i+3]]
}
- return s, nil
-}
-
-func decodeHex(h string, b []byte) error {
- for _, r := range h {
- switch {
- case 'a' <= r && r <= 'f':
- continue
- case '0' <= r && r <= '9':
- continue
- default:
- return errInvalidHexID
- }
+ // If the upper 4 bits of any byte are not zero, there was an invalid hex
+ // character since invalid hex characters are 0xff in hexRev.
+ if invalidMark&0xf0 != 0 {
+ return [8]byte{}, errInvalidHexID
}
-
- decoded, err := hex.DecodeString(h)
- if err != nil {
- return err
+ // If we didn't set any bits, then h was all zeros.
+ if invalidMark == 0 {
+ return [8]byte{}, errNilSpanID
}
-
- copy(b, decoded)
- return nil
+ return b, nil
}
// TraceFlags contains flags that can be set on a SpanContext.
type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`.
-// IsSampled returns if the sampling bit is set in the TraceFlags.
+// IsSampled reports whether the sampling bit is set in the TraceFlags.
func (tf TraceFlags) IsSampled() bool {
return tf&FlagsSampled == FlagsSampled
}
@@ -160,12 +194,20 @@ func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { // nolint:revive //
// MarshalJSON implements a custom marshal function to encode TraceFlags
// as a hex string.
func (tf TraceFlags) MarshalJSON() ([]byte, error) {
- return json.Marshal(tf.String())
+ b := [2 + 2]byte{0: '"', 3: '"'}
+ h := tf.hexBytes()
+ copy(b[1:], h[:])
+ return b[:], nil
}
// String returns the hex string representation form of TraceFlags.
func (tf TraceFlags) String() string {
- return hex.EncodeToString([]byte{byte(tf)}[:])
+ h := tf.hexBytes()
+ return string(h[:])
+}
+
+func (tf TraceFlags) hexBytes() [2]byte {
+ return [2]byte{hexLU[tf>>4], hexLU[tf&0xf]}
}
// SpanContextConfig contains mutable fields usable for constructing
@@ -201,13 +243,13 @@ type SpanContext struct {
var _ json.Marshaler = SpanContext{}
-// IsValid returns if the SpanContext is valid. A valid span context has a
+// IsValid reports whether the SpanContext is valid. A valid span context has a
// valid TraceID and SpanID.
func (sc SpanContext) IsValid() bool {
return sc.HasTraceID() && sc.HasSpanID()
}
-// IsRemote indicates whether the SpanContext represents a remotely-created Span.
+// IsRemote reports whether the SpanContext represents a remotely-created Span.
func (sc SpanContext) IsRemote() bool {
return sc.remote
}
@@ -228,7 +270,7 @@ func (sc SpanContext) TraceID() TraceID {
return sc.traceID
}
-// HasTraceID checks if the SpanContext has a valid TraceID.
+// HasTraceID reports whether the SpanContext has a valid TraceID.
func (sc SpanContext) HasTraceID() bool {
return sc.traceID.IsValid()
}
@@ -249,7 +291,7 @@ func (sc SpanContext) SpanID() SpanID {
return sc.spanID
}
-// HasSpanID checks if the SpanContext has a valid SpanID.
+// HasSpanID reports whether the SpanContext has a valid SpanID.
func (sc SpanContext) HasSpanID() bool {
return sc.spanID.IsValid()
}
@@ -270,7 +312,7 @@ func (sc SpanContext) TraceFlags() TraceFlags {
return sc.traceFlags
}
-// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags.
+// IsSampled reports whether the sampling bit is set in the SpanContext's TraceFlags.
func (sc SpanContext) IsSampled() bool {
return sc.traceFlags.IsSampled()
}
@@ -302,7 +344,7 @@ func (sc SpanContext) WithTraceState(state TraceState) SpanContext {
}
}
-// Equal is a predicate that determines whether two SpanContext values are equal.
+// Equal reports whether two SpanContext values are equal.
func (sc SpanContext) Equal(other SpanContext) bool {
return sc.traceID == other.traceID &&
sc.spanID == other.spanID &&
diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
index dc5e34cad..073adae2f 100644
--- a/vendor/go.opentelemetry.io/otel/trace/tracestate.go
+++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go
@@ -80,7 +80,7 @@ func checkKeyRemain(key string) bool {
//
// param n is remain part length, should be 255 in simple-key or 13 in system-id.
func checkKeyPart(key string, n int) bool {
- if len(key) == 0 {
+ if key == "" {
return false
}
first := key[0] // key's first char
@@ -102,7 +102,7 @@ func isAlphaNum(c byte) bool {
//
// param n is remain part length, should be 240 exactly.
func checkKeyTenant(key string, n int) bool {
- if len(key) == 0 {
+ if key == "" {
return false
}
return isAlphaNum(key[0]) && len(key[1:]) <= n && checkKeyRemain(key[1:])
@@ -191,7 +191,7 @@ func ParseTraceState(ts string) (TraceState, error) {
for ts != "" {
var memberStr string
memberStr, ts, _ = strings.Cut(ts, listDelimiters)
- if len(memberStr) == 0 {
+ if memberStr == "" {
continue
}
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index 7afe92b59..bcaa5aa53 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.37.0"
+ return "1.38.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 9d4742a17..07145e254 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.37.0
+ version: v1.38.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.59.0
+ version: v0.60.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.13.0
+ version: v0.14.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@@ -36,7 +36,7 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.12
+ version: v0.0.13
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 93bcaab03..9a4bd123c 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -280,6 +280,8 @@ type Framer struct {
// lastHeaderStream is non-zero if the last frame was an
// unfinished HEADERS/CONTINUATION.
lastHeaderStream uint32
+ // lastFrameType holds the type of the last frame for verifying frame order.
+ lastFrameType FrameType
maxReadSize uint32
headerBuf [frameHeaderLen]byte
@@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool {
return err != nil
}
-// ReadFrame reads a single frame. The returned Frame is only valid
-// until the next call to ReadFrame.
+// ReadFrameHeader reads the header of the next frame.
+// It reads the 9-byte fixed frame header, and does not read any portion of the
+// frame payload. The caller is responsible for consuming the payload, either
+// with ReadFrameForHeader or directly from the Framer's io.Reader.
//
-// If the frame is larger than previously set with SetMaxReadFrameSize, the
-// returned error is ErrFrameTooLarge. Other errors may be of type
-// ConnectionError, StreamError, or anything else from the underlying
-// reader.
+// If the frame is larger than previously set with SetMaxReadFrameSize, it
+// returns the frame header and ErrFrameTooLarge.
//
-// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
-// indicates the stream responsible for the error.
-func (fr *Framer) ReadFrame() (Frame, error) {
+// If the returned FrameHeader.StreamID is non-zero, it indicates the stream
+// responsible for the error.
+func (fr *Framer) ReadFrameHeader() (FrameHeader, error) {
fr.errDetail = nil
- if fr.lastFrame != nil {
- fr.lastFrame.invalidate()
- }
fh, err := readFrameHeader(fr.headerBuf[:], fr.r)
if err != nil {
- return nil, err
+ return fh, err
}
if fh.Length > fr.maxReadSize {
if fh == invalidHTTP1LookingFrameHeader() {
- return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
+ return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge)
}
- return nil, ErrFrameTooLarge
+ return fh, ErrFrameTooLarge
+ }
+ if err := fr.checkFrameOrder(fh); err != nil {
+ return fh, err
+ }
+ return fh, nil
+}
+
+// ReadFrameForHeader reads the payload for the frame with the given FrameHeader.
+//
+// It behaves identically to ReadFrame, other than not checking the maximum
+// frame size.
+func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) {
+ if fr.lastFrame != nil {
+ fr.lastFrame.invalidate()
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
@@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
}
return nil, err
}
- if err := fr.checkFrameOrder(f); err != nil {
- return nil, err
- }
+ fr.lastFrame = f
if fr.logReads {
fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f))
}
@@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return f, nil
}
+// ReadFrame reads a single frame. The returned Frame is only valid
+// until the next call to ReadFrame or ReadFrameBodyForHeader.
+//
+// If the frame is larger than previously set with SetMaxReadFrameSize, the
+// returned error is ErrFrameTooLarge. Other errors may be of type
+// ConnectionError, StreamError, or anything else from the underlying
+// reader.
+//
+// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID
+// indicates the stream responsible for the error.
+func (fr *Framer) ReadFrame() (Frame, error) {
+ fh, err := fr.ReadFrameHeader()
+ if err != nil {
+ return nil, err
+ }
+ return fr.ReadFrameForHeader(fh)
+}
+
// connError returns ConnectionError(code) but first
// stashes away a public reason to the caller can optionally relay it
// to the peer before hanging up on them. This might help others debug
@@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error {
// checkFrameOrder reports an error if f is an invalid frame to return
// next from ReadFrame. Mostly it checks whether HEADERS and
// CONTINUATION frames are contiguous.
-func (fr *Framer) checkFrameOrder(f Frame) error {
- last := fr.lastFrame
- fr.lastFrame = f
+func (fr *Framer) checkFrameOrder(fh FrameHeader) error {
+ lastType := fr.lastFrameType
+ fr.lastFrameType = fh.Type
if fr.AllowIllegalReads {
return nil
}
- fh := f.Header()
if fr.lastHeaderStream != 0 {
if fh.Type != FrameContinuation {
return fr.connError(ErrCodeProtocol,
fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d",
fh.Type, fh.StreamID,
- last.Header().Type, fr.lastHeaderStream))
+ lastType, fr.lastHeaderStream))
}
if fh.StreamID != fr.lastHeaderStream {
return fr.connError(ErrCodeProtocol,
@@ -1161,7 +1189,7 @@ var defaultRFC9218Priority = PriorityParam{
// PriorityParam struct below is a superset of both schemes. The exported
// symbols are from RFC 7540 and the non-exported ones are from RFC 9218.
-// PriorityParam are the stream prioritzation parameters.
+// PriorityParam are the stream prioritization parameters.
type PriorityParam struct {
// StreamDep is a 31-bit stream identifier for the
// stream that this stream depends on. Zero means no
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index be759b606..1965913e5 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -9,6 +9,7 @@ package http2
import (
"bufio"
"bytes"
+ "compress/flate"
"compress/gzip"
"context"
"crypto/rand"
@@ -3076,35 +3077,102 @@ type erringRoundTripper struct{ err error }
func (rt erringRoundTripper) RoundTripErr() error { return rt.err }
func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err }
+var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body")
+
// gzipReader wraps a response body so it can lazily
-// call gzip.NewReader on the first call to Read
+// get gzip.Reader from the pool on the first call to Read.
+// After Close is called it puts gzip.Reader to the pool immediately
+// if there is no Read in progress or later when Read completes.
type gzipReader struct {
_ incomparable
body io.ReadCloser // underlying Response.Body
- zr *gzip.Reader // lazily-initialized gzip reader
- zerr error // sticky error
+ mu sync.Mutex // guards zr and zerr
+ zr *gzip.Reader // stores gzip reader from the pool between reads
+ zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close
}
-func (gz *gzipReader) Read(p []byte) (n int, err error) {
+type eofReader struct{}
+
+func (eofReader) Read([]byte) (int, error) { return 0, io.EOF }
+func (eofReader) ReadByte() (byte, error) { return 0, io.EOF }
+
+var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }}
+
+// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r.
+func gzipPoolGet(r io.Reader) (*gzip.Reader, error) {
+ zr := gzipPool.Get().(*gzip.Reader)
+ if err := zr.Reset(r); err != nil {
+ gzipPoolPut(zr)
+ return nil, err
+ }
+ return zr, nil
+}
+
+// gzipPoolPut puts a gzip.Reader back into the pool.
+func gzipPoolPut(zr *gzip.Reader) {
+ // Reset will allocate bufio.Reader if we pass it anything
+ // other than a flate.Reader, so ensure that it's getting one.
+ var r flate.Reader = eofReader{}
+ zr.Reset(r)
+ gzipPool.Put(zr)
+}
+
+// acquire returns a gzip.Reader for reading response body.
+// The reader must be released after use.
+func (gz *gzipReader) acquire() (*gzip.Reader, error) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
if gz.zerr != nil {
- return 0, gz.zerr
+ return nil, gz.zerr
}
if gz.zr == nil {
- gz.zr, err = gzip.NewReader(gz.body)
- if err != nil {
- gz.zerr = err
- return 0, err
+ gz.zr, gz.zerr = gzipPoolGet(gz.body)
+ if gz.zerr != nil {
+ return nil, gz.zerr
}
}
- return gz.zr.Read(p)
+ ret := gz.zr
+ gz.zr, gz.zerr = nil, errConcurrentReadOnResBody
+ return ret, nil
}
-func (gz *gzipReader) Close() error {
- if err := gz.body.Close(); err != nil {
- return err
+// release returns the gzip.Reader to the pool if Close was called during Read.
+func (gz *gzipReader) release(zr *gzip.Reader) {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == errConcurrentReadOnResBody {
+ gz.zr, gz.zerr = zr, nil
+ } else { // fs.ErrClosed
+ gzipPoolPut(zr)
+ }
+}
+
+// close returns the gzip.Reader to the pool immediately or
+// signals release to do so after Read completes.
+func (gz *gzipReader) close() {
+ gz.mu.Lock()
+ defer gz.mu.Unlock()
+ if gz.zerr == nil && gz.zr != nil {
+ gzipPoolPut(gz.zr)
+ gz.zr = nil
}
gz.zerr = fs.ErrClosed
- return nil
+}
+
+func (gz *gzipReader) Read(p []byte) (n int, err error) {
+ zr, err := gz.acquire()
+ if err != nil {
+ return 0, err
+ }
+ defer gz.release(zr)
+
+ return zr.Read(p)
+}
+
+func (gz *gzipReader) Close() error {
+ gz.close()
+
+ return gz.body.Close()
}
type errorReader struct{ err error }
diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go
index 4d3890f99..7de27be52 100644
--- a/vendor/golang.org/x/net/http2/writesched.go
+++ b/vendor/golang.org/x/net/http2/writesched.go
@@ -185,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) {
}
// writeQueue is used by implementations of WriteScheduler.
+//
+// Each writeQueue contains a queue of FrameWriteRequests, meant to store all
+// FrameWriteRequests associated with a given stream. This is implemented as a
+// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done
+// by incrementing currPos of currQueue. Adding an item is done by appending it
+// to the nextQueue. If currQueue is empty when trying to remove an item, we
+// can swap currQueue and nextQueue to remedy the situation.
+// This two-stage queue is analogous to the use of two lists in Okasaki's
+// purely functional queue but without the overhead of reversing the list when
+// swapping stages.
+//
+// writeQueue also contains prev and next, this can be used by implementations
+// of WriteScheduler to construct data structures that represent the order of
+// writing between different streams (e.g. circular linked list).
type writeQueue struct {
- s []FrameWriteRequest
+ currQueue []FrameWriteRequest
+ nextQueue []FrameWriteRequest
+ currPos int
+
prev, next *writeQueue
}
-func (q *writeQueue) empty() bool { return len(q.s) == 0 }
+func (q *writeQueue) empty() bool {
+ return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0
+}
func (q *writeQueue) push(wr FrameWriteRequest) {
- q.s = append(q.s, wr)
+ q.nextQueue = append(q.nextQueue, wr)
}
func (q *writeQueue) shift() FrameWriteRequest {
- if len(q.s) == 0 {
+ if q.empty() {
panic("invalid use of queue")
}
- wr := q.s[0]
- // TODO: less copy-happy queue.
- copy(q.s, q.s[1:])
- q.s[len(q.s)-1] = FrameWriteRequest{}
- q.s = q.s[:len(q.s)-1]
+ if q.currPos >= len(q.currQueue) {
+ q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0]
+ }
+ wr := q.currQueue[q.currPos]
+ q.currQueue[q.currPos] = FrameWriteRequest{}
+ q.currPos++
return wr
}
+func (q *writeQueue) peek() *FrameWriteRequest {
+ if q.currPos < len(q.currQueue) {
+ return &q.currQueue[q.currPos]
+ }
+ if len(q.nextQueue) > 0 {
+ return &q.nextQueue[0]
+ }
+ return nil
+}
+
// consume consumes up to n bytes from q.s[0]. If the frame is
// entirely consumed, it is removed from the queue. If the frame
// is partially consumed, the frame is kept with the consumed
// bytes removed. Returns true iff any bytes were consumed.
func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) {
- if len(q.s) == 0 {
+ if q.empty() {
return FrameWriteRequest{}, false
}
- consumed, rest, numresult := q.s[0].Consume(n)
+ consumed, rest, numresult := q.peek().Consume(n)
switch numresult {
case 0:
return FrameWriteRequest{}, false
case 1:
q.shift()
case 2:
- q.s[0] = rest
+ *q.peek() = rest
}
return consumed, true
}
@@ -232,10 +262,15 @@ type writeQueuePool []*writeQueue
// put inserts an unused writeQueue into the pool.
func (p *writeQueuePool) put(q *writeQueue) {
- for i := range q.s {
- q.s[i] = FrameWriteRequest{}
+ for i := range q.currQueue {
+ q.currQueue[i] = FrameWriteRequest{}
+ }
+ for i := range q.nextQueue {
+ q.nextQueue[i] = FrameWriteRequest{}
}
- q.s = q.s[:0]
+ q.currQueue = q.currQueue[:0]
+ q.nextQueue = q.nextQueue[:0]
+ q.currPos = 0
*p = append(*p, q)
}
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
index 6d24d6a1b..4e33c29a2 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go
@@ -214,8 +214,8 @@ func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i
func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool {
// Prefer the subtree that has sent fewer bytes relative to its weight.
// See sections 5.3.2 and 5.3.4.
- wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes)
- wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes)
+ wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes)
+ wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes)
if bi == 0 && bk == 0 {
return wi >= wk
}
@@ -302,7 +302,6 @@ func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) {
q := n.q
ws.queuePool.put(&q)
- n.q.s = nil
if ws.maxClosedNodesInTree > 0 {
ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n)
} else {
diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
similarity index 99%
rename from vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
rename to vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
index 9b5b8808e..cb4cadc32 100644
--- a/vendor/golang.org/x/net/http2/writesched_priority_rfc9128.go
+++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go
@@ -39,7 +39,7 @@ type priorityWriteSchedulerRFC9218 struct {
prioritizeIncremental bool
}
-func newPriorityWriteSchedulerRFC9128() WriteScheduler {
+func newPriorityWriteSchedulerRFC9218() WriteScheduler {
ws := &priorityWriteSchedulerRFC9218{
streams: make(map[uint32]streamMetadata),
}
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 1d8cffae8..2f45dbc86 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -3,7 +3,7 @@
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
-// cancelation for groups of goroutines working on subtasks of a common task.
+// cancellation for groups of goroutines working on subtasks of a common task.
//
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
// returning errors.
diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh
index d1c8b2640..42517077c 100644
--- a/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -226,6 +226,7 @@ struct ltchars {
#include
#include
#include
+#include
#include
#include
#include
@@ -529,6 +530,7 @@ ccflags="$@"
$2 ~ /^O[CNPFPL][A-Z]+[^_][A-Z]+$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)DLY$/ ||
$2 ~ /^(NL|CR|TAB|BS|VT|FF)[0-9]$/ ||
+ $2 ~ /^(DT|EI|ELF|EV|NN|NT|PF|SHF|SHN|SHT|STB|STT|VER)_/ ||
$2 ~ /^O?XTABS$/ ||
$2 ~ /^TC[IO](ON|OFF)$/ ||
$2 ~ /^IN_/ ||
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 9439af961..06c0eea6f 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -2643,3 +2643,9 @@ func SchedGetAttr(pid int, flags uint) (*SchedAttr, error) {
//sys Cachestat(fd uint, crange *CachestatRange, cstat *Cachestat_t, flags uint) (err error)
//sys Mseal(b []byte, flags uint) (err error)
+
+//sys setMemPolicy(mode int, mask *CPUSet, size int) (err error) = SYS_SET_MEMPOLICY
+
+func SetMemPolicy(mode int, mask *CPUSet) error {
+ return setMemPolicy(mode, mask, _CPU_SETSIZE)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index b6db27d93..d0a75da57 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -853,20 +853,86 @@ const (
DM_VERSION_MAJOR = 0x4
DM_VERSION_MINOR = 0x32
DM_VERSION_PATCHLEVEL = 0x0
+ DT_ADDRRNGHI = 0x6ffffeff
+ DT_ADDRRNGLO = 0x6ffffe00
DT_BLK = 0x6
DT_CHR = 0x2
+ DT_DEBUG = 0x15
DT_DIR = 0x4
+ DT_ENCODING = 0x20
DT_FIFO = 0x1
+ DT_FINI = 0xd
+ DT_FLAGS_1 = 0x6ffffffb
+ DT_GNU_HASH = 0x6ffffef5
+ DT_HASH = 0x4
+ DT_HIOS = 0x6ffff000
+ DT_HIPROC = 0x7fffffff
+ DT_INIT = 0xc
+ DT_JMPREL = 0x17
DT_LNK = 0xa
+ DT_LOOS = 0x6000000d
+ DT_LOPROC = 0x70000000
+ DT_NEEDED = 0x1
+ DT_NULL = 0x0
+ DT_PLTGOT = 0x3
+ DT_PLTREL = 0x14
+ DT_PLTRELSZ = 0x2
DT_REG = 0x8
+ DT_REL = 0x11
+ DT_RELA = 0x7
+ DT_RELACOUNT = 0x6ffffff9
+ DT_RELAENT = 0x9
+ DT_RELASZ = 0x8
+ DT_RELCOUNT = 0x6ffffffa
+ DT_RELENT = 0x13
+ DT_RELSZ = 0x12
+ DT_RPATH = 0xf
DT_SOCK = 0xc
+ DT_SONAME = 0xe
+ DT_STRSZ = 0xa
+ DT_STRTAB = 0x5
+ DT_SYMBOLIC = 0x10
+ DT_SYMENT = 0xb
+ DT_SYMTAB = 0x6
+ DT_TEXTREL = 0x16
DT_UNKNOWN = 0x0
+ DT_VALRNGHI = 0x6ffffdff
+ DT_VALRNGLO = 0x6ffffd00
+ DT_VERDEF = 0x6ffffffc
+ DT_VERDEFNUM = 0x6ffffffd
+ DT_VERNEED = 0x6ffffffe
+ DT_VERNEEDNUM = 0x6fffffff
+ DT_VERSYM = 0x6ffffff0
DT_WHT = 0xe
ECHO = 0x8
ECRYPTFS_SUPER_MAGIC = 0xf15f
EFD_SEMAPHORE = 0x1
EFIVARFS_MAGIC = 0xde5e81e4
EFS_SUPER_MAGIC = 0x414a53
+ EI_CLASS = 0x4
+ EI_DATA = 0x5
+ EI_MAG0 = 0x0
+ EI_MAG1 = 0x1
+ EI_MAG2 = 0x2
+ EI_MAG3 = 0x3
+ EI_NIDENT = 0x10
+ EI_OSABI = 0x7
+ EI_PAD = 0x8
+ EI_VERSION = 0x6
+ ELFCLASS32 = 0x1
+ ELFCLASS64 = 0x2
+ ELFCLASSNONE = 0x0
+ ELFCLASSNUM = 0x3
+ ELFDATA2LSB = 0x1
+ ELFDATA2MSB = 0x2
+ ELFDATANONE = 0x0
+ ELFMAG = "\177ELF"
+ ELFMAG0 = 0x7f
+ ELFMAG1 = 'E'
+ ELFMAG2 = 'L'
+ ELFMAG3 = 'F'
+ ELFOSABI_LINUX = 0x3
+ ELFOSABI_NONE = 0x0
EM_386 = 0x3
EM_486 = 0x6
EM_68K = 0x4
@@ -1152,14 +1218,24 @@ const (
ETH_P_WCCP = 0x883e
ETH_P_X25 = 0x805
ETH_P_XDSA = 0xf8
+ ET_CORE = 0x4
+ ET_DYN = 0x3
+ ET_EXEC = 0x2
+ ET_HIPROC = 0xffff
+ ET_LOPROC = 0xff00
+ ET_NONE = 0x0
+ ET_REL = 0x1
EV_ABS = 0x3
EV_CNT = 0x20
+ EV_CURRENT = 0x1
EV_FF = 0x15
EV_FF_STATUS = 0x17
EV_KEY = 0x1
EV_LED = 0x11
EV_MAX = 0x1f
EV_MSC = 0x4
+ EV_NONE = 0x0
+ EV_NUM = 0x2
EV_PWR = 0x16
EV_REL = 0x2
EV_REP = 0x14
@@ -2276,7 +2352,167 @@ const (
NLM_F_REPLACE = 0x100
NLM_F_REQUEST = 0x1
NLM_F_ROOT = 0x100
+ NN_386_IOPERM = "LINUX"
+ NN_386_TLS = "LINUX"
+ NN_ARC_V2 = "LINUX"
+ NN_ARM_FPMR = "LINUX"
+ NN_ARM_GCS = "LINUX"
+ NN_ARM_HW_BREAK = "LINUX"
+ NN_ARM_HW_WATCH = "LINUX"
+ NN_ARM_PACA_KEYS = "LINUX"
+ NN_ARM_PACG_KEYS = "LINUX"
+ NN_ARM_PAC_ENABLED_KEYS = "LINUX"
+ NN_ARM_PAC_MASK = "LINUX"
+ NN_ARM_POE = "LINUX"
+ NN_ARM_SSVE = "LINUX"
+ NN_ARM_SVE = "LINUX"
+ NN_ARM_SYSTEM_CALL = "LINUX"
+ NN_ARM_TAGGED_ADDR_CTRL = "LINUX"
+ NN_ARM_TLS = "LINUX"
+ NN_ARM_VFP = "LINUX"
+ NN_ARM_ZA = "LINUX"
+ NN_ARM_ZT = "LINUX"
+ NN_AUXV = "CORE"
+ NN_FILE = "CORE"
+ NN_GNU_PROPERTY_TYPE_0 = "GNU"
+ NN_LOONGARCH_CPUCFG = "LINUX"
+ NN_LOONGARCH_CSR = "LINUX"
+ NN_LOONGARCH_HW_BREAK = "LINUX"
+ NN_LOONGARCH_HW_WATCH = "LINUX"
+ NN_LOONGARCH_LASX = "LINUX"
+ NN_LOONGARCH_LBT = "LINUX"
+ NN_LOONGARCH_LSX = "LINUX"
+ NN_MIPS_DSP = "LINUX"
+ NN_MIPS_FP_MODE = "LINUX"
+ NN_MIPS_MSA = "LINUX"
+ NN_PPC_DEXCR = "LINUX"
+ NN_PPC_DSCR = "LINUX"
+ NN_PPC_EBB = "LINUX"
+ NN_PPC_HASHKEYR = "LINUX"
+ NN_PPC_PKEY = "LINUX"
+ NN_PPC_PMU = "LINUX"
+ NN_PPC_PPR = "LINUX"
+ NN_PPC_SPE = "LINUX"
+ NN_PPC_TAR = "LINUX"
+ NN_PPC_TM_CDSCR = "LINUX"
+ NN_PPC_TM_CFPR = "LINUX"
+ NN_PPC_TM_CGPR = "LINUX"
+ NN_PPC_TM_CPPR = "LINUX"
+ NN_PPC_TM_CTAR = "LINUX"
+ NN_PPC_TM_CVMX = "LINUX"
+ NN_PPC_TM_CVSX = "LINUX"
+ NN_PPC_TM_SPR = "LINUX"
+ NN_PPC_VMX = "LINUX"
+ NN_PPC_VSX = "LINUX"
+ NN_PRFPREG = "CORE"
+ NN_PRPSINFO = "CORE"
+ NN_PRSTATUS = "CORE"
+ NN_PRXFPREG = "LINUX"
+ NN_RISCV_CSR = "LINUX"
+ NN_RISCV_TAGGED_ADDR_CTRL = "LINUX"
+ NN_RISCV_VECTOR = "LINUX"
+ NN_S390_CTRS = "LINUX"
+ NN_S390_GS_BC = "LINUX"
+ NN_S390_GS_CB = "LINUX"
+ NN_S390_HIGH_GPRS = "LINUX"
+ NN_S390_LAST_BREAK = "LINUX"
+ NN_S390_PREFIX = "LINUX"
+ NN_S390_PV_CPU_DATA = "LINUX"
+ NN_S390_RI_CB = "LINUX"
+ NN_S390_SYSTEM_CALL = "LINUX"
+ NN_S390_TDB = "LINUX"
+ NN_S390_TIMER = "LINUX"
+ NN_S390_TODCMP = "LINUX"
+ NN_S390_TODPREG = "LINUX"
+ NN_S390_VXRS_HIGH = "LINUX"
+ NN_S390_VXRS_LOW = "LINUX"
+ NN_SIGINFO = "CORE"
+ NN_TASKSTRUCT = "CORE"
+ NN_VMCOREDD = "LINUX"
+ NN_X86_SHSTK = "LINUX"
+ NN_X86_XSAVE_LAYOUT = "LINUX"
+ NN_X86_XSTATE = "LINUX"
NSFS_MAGIC = 0x6e736673
+ NT_386_IOPERM = 0x201
+ NT_386_TLS = 0x200
+ NT_ARC_V2 = 0x600
+ NT_ARM_FPMR = 0x40e
+ NT_ARM_GCS = 0x410
+ NT_ARM_HW_BREAK = 0x402
+ NT_ARM_HW_WATCH = 0x403
+ NT_ARM_PACA_KEYS = 0x407
+ NT_ARM_PACG_KEYS = 0x408
+ NT_ARM_PAC_ENABLED_KEYS = 0x40a
+ NT_ARM_PAC_MASK = 0x406
+ NT_ARM_POE = 0x40f
+ NT_ARM_SSVE = 0x40b
+ NT_ARM_SVE = 0x405
+ NT_ARM_SYSTEM_CALL = 0x404
+ NT_ARM_TAGGED_ADDR_CTRL = 0x409
+ NT_ARM_TLS = 0x401
+ NT_ARM_VFP = 0x400
+ NT_ARM_ZA = 0x40c
+ NT_ARM_ZT = 0x40d
+ NT_AUXV = 0x6
+ NT_FILE = 0x46494c45
+ NT_GNU_PROPERTY_TYPE_0 = 0x5
+ NT_LOONGARCH_CPUCFG = 0xa00
+ NT_LOONGARCH_CSR = 0xa01
+ NT_LOONGARCH_HW_BREAK = 0xa05
+ NT_LOONGARCH_HW_WATCH = 0xa06
+ NT_LOONGARCH_LASX = 0xa03
+ NT_LOONGARCH_LBT = 0xa04
+ NT_LOONGARCH_LSX = 0xa02
+ NT_MIPS_DSP = 0x800
+ NT_MIPS_FP_MODE = 0x801
+ NT_MIPS_MSA = 0x802
+ NT_PPC_DEXCR = 0x111
+ NT_PPC_DSCR = 0x105
+ NT_PPC_EBB = 0x106
+ NT_PPC_HASHKEYR = 0x112
+ NT_PPC_PKEY = 0x110
+ NT_PPC_PMU = 0x107
+ NT_PPC_PPR = 0x104
+ NT_PPC_SPE = 0x101
+ NT_PPC_TAR = 0x103
+ NT_PPC_TM_CDSCR = 0x10f
+ NT_PPC_TM_CFPR = 0x109
+ NT_PPC_TM_CGPR = 0x108
+ NT_PPC_TM_CPPR = 0x10e
+ NT_PPC_TM_CTAR = 0x10d
+ NT_PPC_TM_CVMX = 0x10a
+ NT_PPC_TM_CVSX = 0x10b
+ NT_PPC_TM_SPR = 0x10c
+ NT_PPC_VMX = 0x100
+ NT_PPC_VSX = 0x102
+ NT_PRFPREG = 0x2
+ NT_PRPSINFO = 0x3
+ NT_PRSTATUS = 0x1
+ NT_PRXFPREG = 0x46e62b7f
+ NT_RISCV_CSR = 0x900
+ NT_RISCV_TAGGED_ADDR_CTRL = 0x902
+ NT_RISCV_VECTOR = 0x901
+ NT_S390_CTRS = 0x304
+ NT_S390_GS_BC = 0x30c
+ NT_S390_GS_CB = 0x30b
+ NT_S390_HIGH_GPRS = 0x300
+ NT_S390_LAST_BREAK = 0x306
+ NT_S390_PREFIX = 0x305
+ NT_S390_PV_CPU_DATA = 0x30e
+ NT_S390_RI_CB = 0x30d
+ NT_S390_SYSTEM_CALL = 0x307
+ NT_S390_TDB = 0x308
+ NT_S390_TIMER = 0x301
+ NT_S390_TODCMP = 0x302
+ NT_S390_TODPREG = 0x303
+ NT_S390_VXRS_HIGH = 0x30a
+ NT_S390_VXRS_LOW = 0x309
+ NT_SIGINFO = 0x53494749
+ NT_TASKSTRUCT = 0x4
+ NT_VMCOREDD = 0x700
+ NT_X86_SHSTK = 0x204
+ NT_X86_XSAVE_LAYOUT = 0x205
+ NT_X86_XSTATE = 0x202
OCFS2_SUPER_MAGIC = 0x7461636f
OCRNL = 0x8
OFDEL = 0x80
@@ -2463,6 +2699,59 @@ const (
PERF_RECORD_MISC_USER = 0x2
PERF_SAMPLE_BRANCH_PLM_ALL = 0x7
PERF_SAMPLE_WEIGHT_TYPE = 0x1004000
+ PF_ALG = 0x26
+ PF_APPLETALK = 0x5
+ PF_ASH = 0x12
+ PF_ATMPVC = 0x8
+ PF_ATMSVC = 0x14
+ PF_AX25 = 0x3
+ PF_BLUETOOTH = 0x1f
+ PF_BRIDGE = 0x7
+ PF_CAIF = 0x25
+ PF_CAN = 0x1d
+ PF_DECnet = 0xc
+ PF_ECONET = 0x13
+ PF_FILE = 0x1
+ PF_IB = 0x1b
+ PF_IEEE802154 = 0x24
+ PF_INET = 0x2
+ PF_INET6 = 0xa
+ PF_IPX = 0x4
+ PF_IRDA = 0x17
+ PF_ISDN = 0x22
+ PF_IUCV = 0x20
+ PF_KCM = 0x29
+ PF_KEY = 0xf
+ PF_LLC = 0x1a
+ PF_LOCAL = 0x1
+ PF_MAX = 0x2e
+ PF_MCTP = 0x2d
+ PF_MPLS = 0x1c
+ PF_NETBEUI = 0xd
+ PF_NETLINK = 0x10
+ PF_NETROM = 0x6
+ PF_NFC = 0x27
+ PF_PACKET = 0x11
+ PF_PHONET = 0x23
+ PF_PPPOX = 0x18
+ PF_QIPCRTR = 0x2a
+ PF_R = 0x4
+ PF_RDS = 0x15
+ PF_ROSE = 0xb
+ PF_ROUTE = 0x10
+ PF_RXRPC = 0x21
+ PF_SECURITY = 0xe
+ PF_SMC = 0x2b
+ PF_SNA = 0x16
+ PF_TIPC = 0x1e
+ PF_UNIX = 0x1
+ PF_UNSPEC = 0x0
+ PF_VSOCK = 0x28
+ PF_W = 0x2
+ PF_WANPIPE = 0x19
+ PF_X = 0x1
+ PF_X25 = 0x9
+ PF_XDP = 0x2c
PID_FS_MAGIC = 0x50494446
PIPEFS_MAGIC = 0x50495045
PPPIOCGNPMODE = 0xc008744c
@@ -2758,6 +3047,23 @@ const (
PTRACE_SYSCALL_INFO_NONE = 0x0
PTRACE_SYSCALL_INFO_SECCOMP = 0x3
PTRACE_TRACEME = 0x0
+ PT_AARCH64_MEMTAG_MTE = 0x70000002
+ PT_DYNAMIC = 0x2
+ PT_GNU_EH_FRAME = 0x6474e550
+ PT_GNU_PROPERTY = 0x6474e553
+ PT_GNU_RELRO = 0x6474e552
+ PT_GNU_STACK = 0x6474e551
+ PT_HIOS = 0x6fffffff
+ PT_HIPROC = 0x7fffffff
+ PT_INTERP = 0x3
+ PT_LOAD = 0x1
+ PT_LOOS = 0x60000000
+ PT_LOPROC = 0x70000000
+ PT_NOTE = 0x4
+ PT_NULL = 0x0
+ PT_PHDR = 0x6
+ PT_SHLIB = 0x5
+ PT_TLS = 0x7
P_ALL = 0x0
P_PGID = 0x2
P_PID = 0x1
@@ -3091,6 +3397,47 @@ const (
SEEK_MAX = 0x4
SEEK_SET = 0x0
SELINUX_MAGIC = 0xf97cff8c
+ SHF_ALLOC = 0x2
+ SHF_EXCLUDE = 0x8000000
+ SHF_EXECINSTR = 0x4
+ SHF_GROUP = 0x200
+ SHF_INFO_LINK = 0x40
+ SHF_LINK_ORDER = 0x80
+ SHF_MASKOS = 0xff00000
+ SHF_MASKPROC = 0xf0000000
+ SHF_MERGE = 0x10
+ SHF_ORDERED = 0x4000000
+ SHF_OS_NONCONFORMING = 0x100
+ SHF_RELA_LIVEPATCH = 0x100000
+ SHF_RO_AFTER_INIT = 0x200000
+ SHF_STRINGS = 0x20
+ SHF_TLS = 0x400
+ SHF_WRITE = 0x1
+ SHN_ABS = 0xfff1
+ SHN_COMMON = 0xfff2
+ SHN_HIPROC = 0xff1f
+ SHN_HIRESERVE = 0xffff
+ SHN_LIVEPATCH = 0xff20
+ SHN_LOPROC = 0xff00
+ SHN_LORESERVE = 0xff00
+ SHN_UNDEF = 0x0
+ SHT_DYNAMIC = 0x6
+ SHT_DYNSYM = 0xb
+ SHT_HASH = 0x5
+ SHT_HIPROC = 0x7fffffff
+ SHT_HIUSER = 0xffffffff
+ SHT_LOPROC = 0x70000000
+ SHT_LOUSER = 0x80000000
+ SHT_NOBITS = 0x8
+ SHT_NOTE = 0x7
+ SHT_NULL = 0x0
+ SHT_NUM = 0xc
+ SHT_PROGBITS = 0x1
+ SHT_REL = 0x9
+ SHT_RELA = 0x4
+ SHT_SHLIB = 0xa
+ SHT_STRTAB = 0x3
+ SHT_SYMTAB = 0x2
SHUT_RD = 0x0
SHUT_RDWR = 0x2
SHUT_WR = 0x1
@@ -3317,6 +3664,16 @@ const (
STATX_UID = 0x8
STATX_WRITE_ATOMIC = 0x10000
STATX__RESERVED = 0x80000000
+ STB_GLOBAL = 0x1
+ STB_LOCAL = 0x0
+ STB_WEAK = 0x2
+ STT_COMMON = 0x5
+ STT_FILE = 0x4
+ STT_FUNC = 0x2
+ STT_NOTYPE = 0x0
+ STT_OBJECT = 0x1
+ STT_SECTION = 0x3
+ STT_TLS = 0x6
SYNC_FILE_RANGE_WAIT_AFTER = 0x4
SYNC_FILE_RANGE_WAIT_BEFORE = 0x1
SYNC_FILE_RANGE_WRITE = 0x2
@@ -3553,6 +3910,8 @@ const (
UTIME_OMIT = 0x3ffffffe
V9FS_MAGIC = 0x1021997
VERASE = 0x2
+ VER_FLG_BASE = 0x1
+ VER_FLG_WEAK = 0x2
VINTR = 0x0
VKILL = 0x3
VLNEXT = 0xf
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux.go b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
index 5cc1e8eb2..8935d10a3 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_linux.go
@@ -2238,3 +2238,13 @@ func Mseal(b []byte, flags uint) (err error) {
}
return
}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func setMemPolicy(mode int, mask *CPUSet, size int) (err error) {
+ _, _, e1 := Syscall(SYS_SET_MEMPOLICY, uintptr(mode), uintptr(unsafe.Pointer(mask)), uintptr(size))
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 944e75a11..c1a467017 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -3590,6 +3590,8 @@ type Nhmsg struct {
Flags uint32
}
+const SizeofNhmsg = 0x8
+
type NexthopGrp struct {
Id uint32
Weight uint8
@@ -3597,6 +3599,8 @@ type NexthopGrp struct {
Resvd2 uint16
}
+const SizeofNexthopGrp = 0x8
+
const (
NHA_UNSPEC = 0x0
NHA_ID = 0x1
@@ -6332,3 +6336,30 @@ type SockDiagReq struct {
}
const RTM_NEWNVLAN = 0x70
+
+const (
+ MPOL_BIND = 0x2
+ MPOL_DEFAULT = 0x0
+ MPOL_F_ADDR = 0x2
+ MPOL_F_MEMS_ALLOWED = 0x4
+ MPOL_F_MOF = 0x8
+ MPOL_F_MORON = 0x10
+ MPOL_F_NODE = 0x1
+ MPOL_F_NUMA_BALANCING = 0x2000
+ MPOL_F_RELATIVE_NODES = 0x4000
+ MPOL_F_SHARED = 0x1
+ MPOL_F_STATIC_NODES = 0x8000
+ MPOL_INTERLEAVE = 0x3
+ MPOL_LOCAL = 0x4
+ MPOL_MAX = 0x7
+ MPOL_MF_INTERNAL = 0x10
+ MPOL_MF_LAZY = 0x8
+ MPOL_MF_MOVE_ALL = 0x4
+ MPOL_MF_MOVE = 0x2
+ MPOL_MF_STRICT = 0x1
+ MPOL_MF_VALID = 0x7
+ MPOL_MODE_FLAGS = 0xe000
+ MPOL_PREFERRED = 0x1
+ MPOL_PREFERRED_MANY = 0x5
+ MPOL_WEIGHTED_INTERLEAVE = 0x6
+)
diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go
index bd5133730..69439df2a 100644
--- a/vendor/golang.org/x/sys/windows/syscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/syscall_windows.go
@@ -892,8 +892,12 @@ const socket_error = uintptr(^uint32(0))
//sys MultiByteToWideChar(codePage uint32, dwFlags uint32, str *byte, nstr int32, wchar *uint16, nwchar int32) (nwrite int32, err error) = kernel32.MultiByteToWideChar
//sys getBestInterfaceEx(sockaddr unsafe.Pointer, pdwBestIfIndex *uint32) (errcode error) = iphlpapi.GetBestInterfaceEx
//sys GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) = iphlpapi.GetIfEntry2Ex
+//sys GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) = iphlpapi.GetIpForwardEntry2
+//sys GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) = iphlpapi.GetIpForwardTable2
//sys GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) = iphlpapi.GetUnicastIpAddressEntry
+//sys FreeMibTable(memory unsafe.Pointer) = iphlpapi.FreeMibTable
//sys NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyIpInterfaceChange
+//sys NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyRouteChange2
//sys NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) = iphlpapi.NotifyUnicastIpAddressChange
//sys CancelMibChangeNotify2(notificationHandle Handle) (errcode error) = iphlpapi.CancelMibChangeNotify2
@@ -916,6 +920,17 @@ type RawSockaddrInet6 struct {
Scope_id uint32
}
+// RawSockaddrInet is a union that contains an IPv4, an IPv6 address, or an address family. See
+// https://learn.microsoft.com/en-us/windows/win32/api/ws2ipdef/ns-ws2ipdef-sockaddr_inet.
+//
+// A [*RawSockaddrInet] may be converted to a [*RawSockaddrInet4] or [*RawSockaddrInet6] using
+// unsafe, depending on the address family.
+type RawSockaddrInet struct {
+ Family uint16
+ Port uint16
+ Data [6]uint32
+}
+
type RawSockaddr struct {
Family uint16
Data [14]int8
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 358be3c7f..6e4f50eb4 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -2320,6 +2320,82 @@ type MibIfRow2 struct {
OutQLen uint64
}
+// IP_ADDRESS_PREFIX stores an IP address prefix. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-ip_address_prefix.
+type IpAddressPrefix struct {
+ Prefix RawSockaddrInet
+ PrefixLength uint8
+}
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_origin.
+const (
+ NlroManual = 0
+ NlroWellKnown = 1
+ NlroDHCP = 2
+ NlroRouterAdvertisement = 3
+ Nlro6to4 = 4
+)
+
+// NL_ROUTE_ORIGIN enumeration from nldef.h or
+// https://learn.microsoft.com/en-us/windows/win32/api/nldef/ne-nldef-nl_route_protocol.
+const (
+ MIB_IPPROTO_OTHER = 1
+ MIB_IPPROTO_LOCAL = 2
+ MIB_IPPROTO_NETMGMT = 3
+ MIB_IPPROTO_ICMP = 4
+ MIB_IPPROTO_EGP = 5
+ MIB_IPPROTO_GGP = 6
+ MIB_IPPROTO_HELLO = 7
+ MIB_IPPROTO_RIP = 8
+ MIB_IPPROTO_IS_IS = 9
+ MIB_IPPROTO_ES_IS = 10
+ MIB_IPPROTO_CISCO = 11
+ MIB_IPPROTO_BBN = 12
+ MIB_IPPROTO_OSPF = 13
+ MIB_IPPROTO_BGP = 14
+ MIB_IPPROTO_IDPR = 15
+ MIB_IPPROTO_EIGRP = 16
+ MIB_IPPROTO_DVMRP = 17
+ MIB_IPPROTO_RPL = 18
+ MIB_IPPROTO_DHCP = 19
+ MIB_IPPROTO_NT_AUTOSTATIC = 10002
+ MIB_IPPROTO_NT_STATIC = 10006
+ MIB_IPPROTO_NT_STATIC_NON_DOD = 10007
+)
+
+// MIB_IPFORWARD_ROW2 stores information about an IP route entry. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_row2.
+type MibIpForwardRow2 struct {
+ InterfaceLuid uint64
+ InterfaceIndex uint32
+ DestinationPrefix IpAddressPrefix
+ NextHop RawSockaddrInet
+ SitePrefixLength uint8
+ ValidLifetime uint32
+ PreferredLifetime uint32
+ Metric uint32
+ Protocol uint32
+ Loopback uint8
+ AutoconfigureAddress uint8
+ Publish uint8
+ Immortal uint8
+ Age uint32
+ Origin uint32
+}
+
+// MIB_IPFORWARD_TABLE2 contains a table of IP route entries. See
+// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_ipforward_table2.
+type MibIpForwardTable2 struct {
+ NumEntries uint32
+ Table [1]MibIpForwardRow2
+}
+
+// Rows returns the IP route entries in the table.
+func (t *MibIpForwardTable2) Rows() []MibIpForwardRow2 {
+ return unsafe.Slice(&t.Table[0], t.NumEntries)
+}
+
// MIB_UNICASTIPADDRESS_ROW stores information about a unicast IP address. See
// https://learn.microsoft.com/en-us/windows/win32/api/netioapi/ns-netioapi-mib_unicastipaddress_row.
type MibUnicastIpAddressRow struct {
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 426151a01..f25b7308a 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -182,13 +182,17 @@ var (
procDwmGetWindowAttribute = moddwmapi.NewProc("DwmGetWindowAttribute")
procDwmSetWindowAttribute = moddwmapi.NewProc("DwmSetWindowAttribute")
procCancelMibChangeNotify2 = modiphlpapi.NewProc("CancelMibChangeNotify2")
+ procFreeMibTable = modiphlpapi.NewProc("FreeMibTable")
procGetAdaptersAddresses = modiphlpapi.NewProc("GetAdaptersAddresses")
procGetAdaptersInfo = modiphlpapi.NewProc("GetAdaptersInfo")
procGetBestInterfaceEx = modiphlpapi.NewProc("GetBestInterfaceEx")
procGetIfEntry = modiphlpapi.NewProc("GetIfEntry")
procGetIfEntry2Ex = modiphlpapi.NewProc("GetIfEntry2Ex")
+ procGetIpForwardEntry2 = modiphlpapi.NewProc("GetIpForwardEntry2")
+ procGetIpForwardTable2 = modiphlpapi.NewProc("GetIpForwardTable2")
procGetUnicastIpAddressEntry = modiphlpapi.NewProc("GetUnicastIpAddressEntry")
procNotifyIpInterfaceChange = modiphlpapi.NewProc("NotifyIpInterfaceChange")
+ procNotifyRouteChange2 = modiphlpapi.NewProc("NotifyRouteChange2")
procNotifyUnicastIpAddressChange = modiphlpapi.NewProc("NotifyUnicastIpAddressChange")
procAddDllDirectory = modkernel32.NewProc("AddDllDirectory")
procAssignProcessToJobObject = modkernel32.NewProc("AssignProcessToJobObject")
@@ -1624,6 +1628,11 @@ func CancelMibChangeNotify2(notificationHandle Handle) (errcode error) {
return
}
+func FreeMibTable(memory unsafe.Pointer) {
+ syscall.SyscallN(procFreeMibTable.Addr(), uintptr(memory))
+ return
+}
+
func GetAdaptersAddresses(family uint32, flags uint32, reserved uintptr, adapterAddresses *IpAdapterAddresses, sizePointer *uint32) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetAdaptersAddresses.Addr(), uintptr(family), uintptr(flags), uintptr(reserved), uintptr(unsafe.Pointer(adapterAddresses)), uintptr(unsafe.Pointer(sizePointer)))
if r0 != 0 {
@@ -1664,6 +1673,22 @@ func GetIfEntry2Ex(level uint32, row *MibIfRow2) (errcode error) {
return
}
+func GetIpForwardEntry2(row *MibIpForwardRow2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardEntry2.Addr(), uintptr(unsafe.Pointer(row)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
+func GetIpForwardTable2(family uint16, table **MibIpForwardTable2) (errcode error) {
+ r0, _, _ := syscall.SyscallN(procGetIpForwardTable2.Addr(), uintptr(family), uintptr(unsafe.Pointer(table)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
func GetUnicastIpAddressEntry(row *MibUnicastIpAddressRow) (errcode error) {
r0, _, _ := syscall.SyscallN(procGetUnicastIpAddressEntry.Addr(), uintptr(unsafe.Pointer(row)))
if r0 != 0 {
@@ -1684,6 +1709,18 @@ func NotifyIpInterfaceChange(family uint16, callback uintptr, callerContext unsa
return
}
+func NotifyRouteChange2(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
+ var _p0 uint32
+ if initialNotification {
+ _p0 = 1
+ }
+ r0, _, _ := syscall.SyscallN(procNotifyRouteChange2.Addr(), uintptr(family), uintptr(callback), uintptr(callerContext), uintptr(_p0), uintptr(unsafe.Pointer(notificationHandle)))
+ if r0 != 0 {
+ errcode = syscall.Errno(r0)
+ }
+ return
+}
+
func NotifyUnicastIpAddressChange(family uint16, callback uintptr, callerContext unsafe.Pointer, initialNotification bool, notificationHandle *Handle) (errcode error) {
var _p0 uint32
if initialNotification {
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index bddb2e2ae..9255449b9 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -413,7 +413,7 @@ func (t *Terminal) eraseNPreviousChars(n int) {
}
}
-// countToLeftWord returns then number of characters from the cursor to the
+// countToLeftWord returns the number of characters from the cursor to the
// start of the previous word.
func (t *Terminal) countToLeftWord() int {
if t.pos == 0 {
@@ -438,7 +438,7 @@ func (t *Terminal) countToLeftWord() int {
return t.pos - pos
}
-// countToRightWord returns then number of characters from the cursor to the
+// countToRightWord returns the number of characters from the cursor to the
// start of the next word.
func (t *Terminal) countToRightWord() int {
pos := t.pos
@@ -478,7 +478,7 @@ func visualLength(runes []rune) int {
return length
}
-// histroryAt unlocks the terminal and relocks it while calling History.At.
+// historyAt unlocks the terminal and relocks it while calling History.At.
func (t *Terminal) historyAt(idx int) (string, bool) {
t.lock.Unlock() // Unlock to avoid deadlock if History methods use the output writer.
defer t.lock.Lock() // panic in At (or Len) protection.
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 89f89dd2d..680a70ca8 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -364,12 +364,6 @@ type jsonPackage struct {
DepsErrors []*packagesinternal.PackageError
}
-type jsonPackageError struct {
- ImportStack []string
- Pos string
- Err string
-}
-
func otherFiles(p *jsonPackage) [][]string {
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
}
diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go
index a6cf0e64a..ade5d1e79 100644
--- a/vendor/golang.org/x/tools/internal/event/core/event.go
+++ b/vendor/golang.org/x/tools/internal/event/core/event.go
@@ -28,11 +28,6 @@ type Event struct {
dynamic []label.Label // dynamically sized storage for remaining labels
}
-// eventLabelMap implements label.Map for a the labels of an Event.
-type eventLabelMap struct {
- event Event
-}
-
func (ev Event) At() time.Time { return ev.at }
func (ev Event) Format(f fmt.State, r rune) {
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index 780873e3a..4a4357d2b 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -569,7 +569,6 @@ func (p *iexporter) exportName(obj types.Object) (res string) {
type iexporter struct {
fset *token.FileSet
- out *bytes.Buffer
version int
shallow bool // don't put types from other packages in the index
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
new file mode 100644
index 000000000..93acff217
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go
@@ -0,0 +1,49 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/token"
+ "go/types"
+)
+
+// NoEffects reports whether the expression has no side effects, i.e., it
+// does not modify the memory state. This function is conservative: it may
+// return false even when the expression has no effect.
+func NoEffects(info *types.Info, expr ast.Expr) bool {
+ noEffects := true
+ ast.Inspect(expr, func(n ast.Node) bool {
+ switch v := n.(type) {
+ case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr,
+ *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr,
+ *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType,
+ *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr:
+ // No effect
+ case *ast.UnaryExpr:
+ // Channel send <-ch has effects
+ if v.Op == token.ARROW {
+ noEffects = false
+ }
+ case *ast.CallExpr:
+ // Type conversion has no effects
+ if !info.Types[v.Fun].IsType() {
+ // TODO(adonovan): Add a case for built-in functions without side
+ // effects (by using callsPureBuiltin from tools/internal/refactor/inline)
+
+ noEffects = false
+ }
+ case *ast.FuncLit:
+ // A FuncLit has no effects, but do not descend into it.
+ return false
+ default:
+ // All other expressions have effects
+ noEffects = false
+ }
+
+ return noEffects
+ })
+ return noEffects
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
new file mode 100644
index 000000000..f2affec4f
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go
@@ -0,0 +1,71 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/types"
+ "slices"
+)
+
+// IsTypeNamed reports whether t is (or is an alias for) a
+// package-level defined type with the given package path and one of
+// the given names. It returns false if t is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool {
+ if named, ok := types.Unalias(t).(*types.Named); ok {
+ tname := named.Obj()
+ return tname != nil &&
+ IsPackageLevel(tname) &&
+ tname.Pkg().Path() == pkgPath &&
+ slices.Contains(names, tname.Name())
+ }
+ return false
+}
+
+// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a
+// package-level defined type with the given package path and one of the given
+// names. It returns false if t is not a pointer type.
+func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool {
+ r := Unpointer(t)
+ if r == t {
+ return false
+ }
+ return IsTypeNamed(r, pkgPath, names...)
+}
+
+// IsFunctionNamed reports whether obj is a package-level function
+// defined in the given package and has one of the given names.
+// It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.Name",
+// which is important for the performance of syntax matching.
+func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool {
+ f, ok := obj.(*types.Func)
+ return ok &&
+ IsPackageLevel(obj) &&
+ f.Pkg().Path() == pkgPath &&
+ f.Type().(*types.Signature).Recv() == nil &&
+ slices.Contains(names, f.Name())
+}
+
+// IsMethodNamed reports whether obj is a method defined on a
+// package-level type with the given package and type name, and has
+// one of the given names. It returns false if obj is nil.
+//
+// This function avoids allocating the concatenation of "pkg.TypeName.Name",
+// which is important for the performance of syntax matching.
+func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool {
+ if fn, ok := obj.(*types.Func); ok {
+ if recv := fn.Type().(*types.Signature).Recv(); recv != nil {
+ _, T := ReceiverNamed(recv)
+ return T != nil &&
+ IsTypeNamed(T, pkgPath, typeName) &&
+ slices.Contains(names, fn.Name())
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
index b64f714eb..64f47919f 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -15,6 +15,14 @@ import (
// file.
// If the same package is imported multiple times, the last appearance is
// recorded.
+//
+// TODO(adonovan): this function ignores the effect of shadowing. It
+// should accept a [token.Pos] and a [types.Info] and compute only the
+// set of imports that are not shadowed at that point, analogous to
+// [analysisinternal.AddImport]. It could also compute (as a side
+// effect) the set of additional imports required to ensure that there
+// is an accessible import for each necessary package, making it
+// converge even more closely with AddImport.
func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
// Construct mapping of import paths to their defined names.
// It is only necessary to look at renaming imports.
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index a5cd7e8db..fef74a785 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -2,8 +2,20 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package typesinternal provides access to internal go/types APIs that are not
-// yet exported.
+// Package typesinternal provides helpful operators for dealing with
+// go/types:
+//
+// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]);
+// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]);
+// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]);
+// - access to internal go/types APIs that are not yet
+// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and
+// - common algorithms related to types (e.g. [TooNewStdSymbols]).
+//
+// See also:
+// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax;
+// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers;
+// - [golang.org/x/tools/internal/refactor], for operators to compute text edits.
package typesinternal
import (
@@ -13,6 +25,7 @@ import (
"reflect"
"unsafe"
+ "golang.org/x/tools/go/ast/inspector"
"golang.org/x/tools/internal/aliases"
)
@@ -60,6 +73,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o
// which is often excessive.)
//
// If pkg is nil, it is equivalent to [*types.Package.Name].
+//
+// TODO(adonovan): all uses of this with TypeString should be
+// eliminated when https://go.dev/issues/75604 is resolved.
func NameRelativeTo(pkg *types.Package) types.Qualifier {
return func(other *types.Package) string {
if pkg != nil && pkg == other {
@@ -153,3 +169,31 @@ func NewTypesInfo() *types.Info {
FileVersions: map[*ast.File]string{},
}
}
+
+// EnclosingScope returns the innermost block logically enclosing the cursor.
+func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope {
+ for cur := range cur.Enclosing() {
+ n := cur.Node()
+ // A function's Scope is associated with its FuncType.
+ switch f := n.(type) {
+ case *ast.FuncDecl:
+ n = f.Type
+ case *ast.FuncLit:
+ n = f.Type
+ }
+ if b := info.Scopes[n]; b != nil {
+ return b
+ }
+ }
+ panic("no Scope for *ast.File")
+}
+
+// Imports reports whether path is imported by pkg.
+func Imports(pkg *types.Package, path string) bool {
+ for _, imp := range pkg.Imports() {
+ if imp.Path() == path {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
index d272949c1..453bba2ad 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
}
}
-// IsZeroExpr uses simple syntactic heuristics to report whether expr
-// is a obvious zero value, such as 0, "", nil, or false.
-// It cannot do better without type information.
-func IsZeroExpr(expr ast.Expr) bool {
- switch e := expr.(type) {
- case *ast.BasicLit:
- return e.Value == "0" || e.Value == `""`
- case *ast.Ident:
- return e.Name == "nil" || e.Name == "false"
- default:
- return false
- }
-}
-
// TypeExpr returns syntax for the specified type. References to named types
// are qualified by an appropriate (optional) qualifier function.
// It may panic for types such as Tuple or Union.
+//
+// See also https://go.dev/issues/75604, which will provide a robust
+// Type-to-valid-Go-syntax formatter.
func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
switch t := t.(type) {
case *types.Basic:
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index b15c10e46..b4bc3a2bf 100644
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -16,55 +16,124 @@
*
*/
-// Package pickfirst contains the pick_first load balancing policy.
+// Package pickfirst contains the pick_first load balancing policy which
+// is the universal leaf policy.
package pickfirst
import (
"encoding/json"
"errors"
"fmt"
- rand "math/rand/v2"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/pickfirst/internal"
"google.golang.org/grpc/connectivity"
+ expstats "google.golang.org/grpc/experimental/stats"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
-
- _ "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf" // For automatically registering the new pickfirst if required.
)
func init() {
- if envconfig.NewPickFirstEnabled {
- return
- }
balancer.Register(pickfirstBuilder{})
}
-var logger = grpclog.Component("pick-first-lb")
+// Name is the name of the pick_first balancer.
+const Name = "pick_first"
+
+// enableHealthListenerKeyType is a unique key type used in resolver
+// attributes to indicate whether the health listener usage is enabled.
+type enableHealthListenerKeyType struct{}
+
+var (
+ logger = grpclog.Component("pick-first-leaf-lb")
+ disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.disconnections",
+ Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
+ Unit: "{disconnection}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_succeeded",
+ Description: "EXPERIMENTAL. Number of successful connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+ connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
+ Name: "grpc.lb.pick_first.connection_attempts_failed",
+ Description: "EXPERIMENTAL. Number of failed connection attempts.",
+ Unit: "{attempt}",
+ Labels: []string{"grpc.target"},
+ Default: false,
+ })
+)
const (
- // Name is the name of the pick_first balancer.
- Name = "pick_first"
- logPrefix = "[pick-first-lb %p] "
+ // TODO: change to pick-first when this becomes the default pick_first policy.
+ logPrefix = "[pick-first-leaf-lb %p] "
+ // connectionDelayInterval is the time to wait for during the happy eyeballs
+ // pass before starting the next connection attempt.
+ connectionDelayInterval = 250 * time.Millisecond
+)
+
+type ipAddrFamily int
+
+const (
+ // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
+ // address.
+ ipAddrFamilyUnknown ipAddrFamily = iota
+ ipAddrFamilyV4
+ ipAddrFamilyV6
)
type pickfirstBuilder struct{}
-func (pickfirstBuilder) Build(cc balancer.ClientConn, _ balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{cc: cc}
+func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
+ b := &pickfirstBalancer{
+ cc: cc,
+ target: bo.Target.String(),
+ metricsRecorder: cc.MetricsRecorder(),
+
+ subConns: resolver.NewAddressMapV2[*scData](),
+ state: connectivity.Connecting,
+ cancelConnectionTimer: func() {},
+ }
b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
return b
}
-func (pickfirstBuilder) Name() string {
+func (b pickfirstBuilder) Name() string {
return Name
}
+func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
+ var cfg pfConfig
+ if err := json.Unmarshal(js, &cfg); err != nil {
+ return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+ }
+ return cfg, nil
+}
+
+// EnableHealthListener updates the state to configure pickfirst for using a
+// generic health listener.
+//
+// # Experimental
+//
+// Notice: This API is EXPERIMENTAL and may be changed or removed in a later
+// release.
+func EnableHealthListener(state resolver.State) resolver.State {
+ state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
+ return state
+}
+
type pfConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
@@ -74,90 +143,129 @@ type pfConfig struct {
ShuffleAddressList bool `json:"shuffleAddressList"`
}
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
+// scData keeps track of the current state of the subConn.
+// It is not safe for concurrent access.
+type scData struct {
+ // The following fields are initialized at build time and read-only after
+ // that.
+ subConn balancer.SubConn
+ addr resolver.Address
+
+ rawConnectivityState connectivity.State
+ // The effective connectivity state based on raw connectivity, health state
+ // and after following sticky TransientFailure behaviour defined in A62.
+ effectiveState connectivity.State
+ lastErr error
+ connectionFailedInFirstPass bool
+}
+
+func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
+ sd := &scData{
+ rawConnectivityState: connectivity.Idle,
+ effectiveState: connectivity.Idle,
+ addr: addr,
}
- return cfg, nil
+ sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
+ StateListener: func(state balancer.SubConnState) {
+ b.updateSubConnState(sd, state)
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ sd.subConn = sc
+ return sd, nil
}
type pickfirstBalancer struct {
- logger *internalgrpclog.PrefixLogger
- state connectivity.State
- cc balancer.ClientConn
- subConn balancer.SubConn
+ // The following fields are initialized at build time and read-only after
+ // that and therefore do not need to be guarded by a mutex.
+ logger *internalgrpclog.PrefixLogger
+ cc balancer.ClientConn
+ target string
+ metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
+
+ // The mutex is used to ensure synchronization of updates triggered
+ // from the idle picker and the already serialized resolver,
+ // SubConn state updates.
+ mu sync.Mutex
+ // State reported to the channel based on SubConn states and resolver
+ // updates.
+ state connectivity.State
+ // scData for active subonns mapped by address.
+ subConns *resolver.AddressMapV2[*scData]
+ addressList addressList
+ firstPass bool
+ numTF int
+ cancelConnectionTimer func()
+ healthCheckingEnabled bool
}
+// ResolverError is called by the ClientConn when the name resolver produces
+// an error or when pickfirst determined the resolver update to be invalid.
func (b *pickfirstBalancer) ResolverError(err error) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.resolverErrorLocked(err)
+}
+
+func (b *pickfirstBalancer) resolverErrorLocked(err error) {
if b.logger.V(2) {
b.logger.Infof("Received error from the name resolver: %v", err)
}
- if b.subConn == nil {
- b.state = connectivity.TransientFailure
- }
- if b.state != connectivity.TransientFailure {
- // The picker will not change since the balancer does not currently
- // report an error.
+ // The picker will not change since the balancer does not currently
+ // report an error. If the balancer hasn't received a single good resolver
+ // update yet, transition to TRANSIENT_FAILURE.
+ if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
+ if b.logger.V(2) {
+ b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
+ }
return
}
- b.cc.UpdateState(balancer.State{
+
+ b.updateBalancerState(balancer.State{
ConnectivityState: connectivity.TransientFailure,
Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
})
}
-// Shuffler is an interface for shuffling an address list.
-type Shuffler interface {
- ShuffleAddressListForTesting(n int, swap func(i, j int))
-}
-
-// ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
-// is the number of elements. swap swaps the elements with indexes i and j.
-func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
-
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.cancelConnectionTimer()
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // The resolver reported an empty address list. Treat it like an error by
- // calling b.ResolverError.
- if b.subConn != nil {
- // Shut down the old subConn. All addresses were removed, so it is
- // no longer valid.
- b.subConn.Shutdown()
- b.subConn = nil
- }
- b.ResolverError(errors.New("produced zero addresses"))
+ // Cleanup state pertaining to the previous resolver state.
+ // Treat an empty address list like an error by calling b.ResolverError.
+ b.closeSubConnsLocked()
+ b.addressList.updateAddrs(nil)
+ b.resolverErrorLocked(errors.New("produced zero addresses"))
return balancer.ErrBadResolverState
}
- // We don't have to guard this block with the env var because ParseConfig
- // already does so.
+ b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
cfg, ok := state.BalancerConfig.(pfConfig)
if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v", state.BalancerConfig, state.BalancerConfig)
+ return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
}
if b.logger.V(2) {
b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
}
- var addrs []resolver.Address
+ var newAddrs []resolver.Address
if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling will
- // change the order of endpoints but not touch the order of the addresses
- // within each endpoint. - A61
+ // Perform the optional shuffling described in gRFC A62. The shuffling
+ // will change the order of endpoints but not touch the order of the
+ // addresses within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
- // "Flatten the list by concatenating the ordered list of addresses for each
- // of the endpoints, in order." - A61
+ // "Flatten the list by concatenating the ordered list of addresses for
+ // each of the endpoints, in order." - A61
for _, endpoint := range endpoints {
- // "In the flattened list, interleave addresses from the two address
- // families, as per RFC-8304 section 4." - A61
- // TODO: support the above language.
- addrs = append(addrs, endpoint.Addresses...)
+ newAddrs = append(newAddrs, endpoint.Addresses...)
}
} else {
// Endpoints not set, process addresses until we migrate resolver
@@ -166,42 +274,53 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// target do not forward the corresponding correct endpoints down/split
// endpoints properly. Once all balancers correctly forward endpoints
// down, can delete this else conditional.
- addrs = state.ResolverState.Addresses
+ newAddrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
- addrs = append([]resolver.Address{}, addrs...)
- internal.RandShuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ newAddrs = append([]resolver.Address{}, newAddrs...)
+ internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
}
}
- if b.subConn != nil {
- b.cc.UpdateAddresses(b.subConn, addrs)
+ // If an address appears in multiple endpoints or in the same endpoint
+ // multiple times, we keep it only once. We will create only one SubConn
+ // for the address because an AddressMap is used to store SubConns.
+ // Not de-duplicating would result in attempting to connect to the same
+ // SubConn multiple times in the same pass. We don't want this.
+ newAddrs = deDupAddresses(newAddrs)
+ newAddrs = interleaveAddresses(newAddrs)
+
+ prevAddr := b.addressList.currentAddress()
+ prevSCData, found := b.subConns.Get(prevAddr)
+ prevAddrsCount := b.addressList.size()
+ isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
+ b.addressList.updateAddrs(newAddrs)
+
+ // If the previous ready SubConn exists in new address list,
+ // keep this connection and don't create new SubConns.
+ if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
return nil
}
- var subConn balancer.SubConn
- subConn, err := b.cc.NewSubConn(addrs, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(subConn, state)
- },
- })
- if err != nil {
- if b.logger.V(2) {
- b.logger.Infof("Failed to create new SubConn: %v", err)
- }
- b.state = connectivity.TransientFailure
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("error creating connection: %v", err)},
+ b.reconcileSubConnsLocked(newAddrs)
+ // If it's the first resolver update or the balancer was already READY
+ // (but the new address list does not contain the ready SubConn) or
+ // CONNECTING, enter CONNECTING.
+ // We may be in TRANSIENT_FAILURE due to a previous empty address list,
+ // we should still enter CONNECTING because the sticky TF behaviour
+ // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
+ // due to connectivity failures.
+ if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
+ // Start connection attempt at first address.
+ b.forceUpdateConcludedStateLocked(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
- return balancer.ErrBadResolverState
+ b.startFirstPassLocked()
+ } else if b.state == connectivity.TransientFailure {
+ // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
+ // we're READY. See A62.
+ b.startFirstPassLocked()
}
- b.subConn = subConn
- b.state = connectivity.Idle
- b.cc.UpdateState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.subConn.Connect()
return nil
}
@@ -211,63 +330,484 @@ func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state b
b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
}
-func (b *pickfirstBalancer) updateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- if b.logger.V(2) {
- b.logger.Infof("Received SubConn state update: %p, %+v", subConn, state)
+func (b *pickfirstBalancer) Close() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ b.closeSubConnsLocked()
+ b.cancelConnectionTimer()
+ b.state = connectivity.Shutdown
+}
+
+// ExitIdle moves the balancer out of idle state. It can be called concurrently
+// by the idlePicker and clientConn so access to variables should be
+// synchronized.
+func (b *pickfirstBalancer) ExitIdle() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ if b.state == connectivity.Idle {
+ // Move the balancer into CONNECTING state immediately. This is done to
+ // avoid staying in IDLE if a resolver update arrives before the first
+ // SubConn reports CONNECTING.
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ b.startFirstPassLocked()
+ }
+}
+
+func (b *pickfirstBalancer) startFirstPassLocked() {
+ b.firstPass = true
+ b.numTF = 0
+ // Reset the connection attempt record for existing SubConns.
+ for _, sd := range b.subConns.Values() {
+ sd.connectionFailedInFirstPass = false
+ }
+ b.requestConnectionLocked()
+}
+
+func (b *pickfirstBalancer) closeSubConnsLocked() {
+ for _, sd := range b.subConns.Values() {
+ sd.subConn.Shutdown()
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+}
+
+// deDupAddresses ensures that each address appears only once in the slice.
+func deDupAddresses(addrs []resolver.Address) []resolver.Address {
+ seenAddrs := resolver.NewAddressMapV2[bool]()
+ retAddrs := []resolver.Address{}
+
+ for _, addr := range addrs {
+ if _, ok := seenAddrs.Get(addr); ok {
+ continue
+ }
+ seenAddrs.Set(addr, true)
+ retAddrs = append(retAddrs, addr)
+ }
+ return retAddrs
+}
+
+// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
+// as per RFC-8305 section 4.
+// Whichever address family is first in the list is followed by an address of
+// the other address family; that is, if the first address in the list is IPv6,
+// then the first IPv4 address should be moved up in the list to be second in
+// the list. It doesn't support configuring "First Address Family Count", i.e.
+// there will always be a single member of the first address family at the
+// beginning of the interleaved list.
+// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
+// "unknown" family for interleaving.
+// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
+func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
+ familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
+ interleavingOrder := []ipAddrFamily{}
+ for _, addr := range addrs {
+ family := addressFamily(addr.Addr)
+ if _, found := familyAddrsMap[family]; !found {
+ interleavingOrder = append(interleavingOrder, family)
+ }
+ familyAddrsMap[family] = append(familyAddrsMap[family], addr)
+ }
+
+ interleavedAddrs := make([]resolver.Address, 0, len(addrs))
+
+ for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
+ // Some IP types may have fewer addresses than others, so we look for
+ // the next type that has a remaining member to add to the interleaved
+ // list.
+ family := interleavingOrder[curFamilyIdx]
+ remainingMembers := familyAddrsMap[family]
+ if len(remainingMembers) > 0 {
+ interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
+ familyAddrsMap[family] = remainingMembers[1:]
+ }
+ }
+
+ return interleavedAddrs
+}
+
+// addressFamily returns the ipAddrFamily after parsing the address string.
+// If the address isn't of the format "ip-address:port", it returns
+// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
+// using a resolver like passthrough where the address may be a hostname in
+// some format that the dialer can resolve.
+func addressFamily(address string) ipAddrFamily {
+ // Parse the IP after removing the port.
+ host, _, err := net.SplitHostPort(address)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ ip, err := netip.ParseAddr(host)
+ if err != nil {
+ return ipAddrFamilyUnknown
+ }
+ switch {
+ case ip.Is4() || ip.Is4In6():
+ return ipAddrFamilyV4
+ case ip.Is6():
+ return ipAddrFamilyV6
+ default:
+ return ipAddrFamilyUnknown
+ }
+}
+
+// reconcileSubConnsLocked updates the active subchannels based on a new address
+// list from the resolver. It does this by:
+// - closing subchannels: any existing subchannels associated with addresses
+// that are no longer in the updated list are shut down.
+// - removing subchannels: entries for these closed subchannels are removed
+// from the subchannel map.
+//
+// This ensures that the subchannel map accurately reflects the current set of
+// addresses received from the name resolver.
+func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
+ newAddrsMap := resolver.NewAddressMapV2[bool]()
+ for _, addr := range newAddrs {
+ newAddrsMap.Set(addr, true)
+ }
+
+ for _, oldAddr := range b.subConns.Keys() {
+ if _, ok := newAddrsMap.Get(oldAddr); ok {
+ continue
+ }
+ val, _ := b.subConns.Get(oldAddr)
+ val.subConn.Shutdown()
+ b.subConns.Delete(oldAddr)
+ }
+}
+
+// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
+// becomes ready, which means that all other subConn must be shutdown.
+func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
+ b.cancelConnectionTimer()
+ for _, sd := range b.subConns.Values() {
+ if sd.subConn != selected.subConn {
+ sd.subConn.Shutdown()
+ }
+ }
+ b.subConns = resolver.NewAddressMapV2[*scData]()
+ b.subConns.Set(selected.addr, selected)
+}
+
+// requestConnectionLocked starts connecting on the subchannel corresponding to
+// the current address. If no subchannel exists, one is created. If the current
+// subchannel is in TransientFailure, a connection to the next address is
+// attempted until a subchannel is found.
+func (b *pickfirstBalancer) requestConnectionLocked() {
+ if !b.addressList.isValid() {
+ return
+ }
+ var lastErr error
+ for valid := true; valid; valid = b.addressList.increment() {
+ curAddr := b.addressList.currentAddress()
+ sd, ok := b.subConns.Get(curAddr)
+ if !ok {
+ var err error
+ // We want to assign the new scData to sd from the outer scope,
+ // hence we can't use := below.
+ sd, err = b.newSCData(curAddr)
+ if err != nil {
+ // This should never happen, unless the clientConn is being shut
+ // down.
+ if b.logger.V(2) {
+ b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
+ }
+ // Do nothing, the LB policy will be closed soon.
+ return
+ }
+ b.subConns.Set(curAddr, sd)
+ }
+
+ switch sd.rawConnectivityState {
+ case connectivity.Idle:
+ sd.subConn.Connect()
+ b.scheduleNextConnectionLocked()
+ return
+ case connectivity.TransientFailure:
+ // The SubConn is being re-used and failed during a previous pass
+ // over the addressList. It has not completed backoff yet.
+ // Mark it as having failed and try the next address.
+ sd.connectionFailedInFirstPass = true
+ lastErr = sd.lastErr
+ continue
+ case connectivity.Connecting:
+ // Wait for the connection attempt to complete or the timer to fire
+ // before attempting the next address.
+ b.scheduleNextConnectionLocked()
+ return
+ default:
+ b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
+ return
+
+ }
+ }
+
+ // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
+ // first pass if possible.
+ b.endFirstPassIfPossibleLocked(lastErr)
+}
+
+func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
+ b.cancelConnectionTimer()
+ if !b.addressList.hasNext() {
+ return
}
- if b.subConn != subConn {
+ curAddr := b.addressList.currentAddress()
+ cancelled := false // Access to this is protected by the balancer's mutex.
+ closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // If the scheduled task is cancelled while acquiring the mutex, return.
+ if cancelled {
+ return
+ }
if b.logger.V(2) {
- b.logger.Infof("Ignored state change because subConn is not recognized")
+ b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
+ }
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
}
+ })
+ // Access to the cancellation callback held by the balancer is guarded by
+ // the balancer's mutex, so it's safe to set the boolean from the callback.
+ b.cancelConnectionTimer = sync.OnceFunc(func() {
+ cancelled = true
+ closeFn()
+ })
+}
+
+func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ oldState := sd.rawConnectivityState
+ sd.rawConnectivityState = newState.ConnectivityState
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes this
+ // SubConn.
+ if !b.isActiveSCData(sd) {
return
}
- if state.ConnectivityState == connectivity.Shutdown {
- b.subConn = nil
+ if newState.ConnectivityState == connectivity.Shutdown {
+ sd.effectiveState = connectivity.Shutdown
return
}
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{result: balancer.PickResult{SubConn: subConn}},
- })
- case connectivity.Connecting:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. See A62.
+ // Record a connection attempt when exiting CONNECTING.
+ if newState.ConnectivityState == connectivity.TransientFailure {
+ sd.connectionFailedInFirstPass = true
+ connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+
+ if newState.ConnectivityState == connectivity.Ready {
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ b.shutdownRemainingLocked(sd)
+ if !b.addressList.seekTo(sd.addr) {
+ // This should not fail as we should have only one SubConn after
+ // entering READY. The SubConn should be present in the addressList.
+ b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
return
}
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
+ if !b.healthCheckingEnabled {
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
+ }
+
+ sd.effectiveState = connectivity.Ready
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
+ })
+ return
+ }
+ if b.logger.V(2) {
+ b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
+ }
+ // Send a CONNECTING update to take the SubConn out of sticky-TF if
+ // required.
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
+ sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
+ b.updateSubConnHealthState(sd, scs)
+ })
+ return
+ }
+
+ // If the LB policy is READY, and it receives a subchannel state change,
+ // it means that the READY subchannel has failed.
+ // A SubConn can also transition from CONNECTING directly to IDLE when
+ // a transport is successfully created, but the connection fails
+ // before the SubConn can send the notification for READY. We treat
+ // this as a successful connection and transition to IDLE.
+ // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
+ // part of the if condition below once the issue is fixed.
+ if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
+ // Once a transport fails, the balancer enters IDLE and starts from
+ // the first address when the picker is used.
+ b.shutdownRemainingLocked(sd)
+ sd.effectiveState = newState.ConnectivityState
+ // READY SubConn interspliced in between CONNECTING and IDLE, need to
+ // account for that.
+ if oldState == connectivity.Connecting {
+ // A known issue (https://github.com/grpc/grpc-go/issues/7862)
+ // causes a race that prevents the READY state change notification.
+ // This works around it.
+ connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
+ }
+ disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
+ b.addressList.reset()
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Idle,
+ Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
+ })
+ return
+ }
+
+ if b.firstPass {
+ switch newState.ConnectivityState {
+ case connectivity.Connecting:
+ // The effective state can be in either IDLE, CONNECTING or
+ // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
+ // TRANSIENT_FAILURE until it's READY. See A62.
+ if sd.effectiveState != connectivity.TransientFailure {
+ sd.effectiveState = connectivity.Connecting
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
+ })
+ }
+ case connectivity.TransientFailure:
+ sd.lastErr = newState.ConnectionError
+ sd.effectiveState = connectivity.TransientFailure
+ // Since we're re-using common SubConns while handling resolver
+ // updates, we could receive an out of turn TRANSIENT_FAILURE from
+ // a pass over the previous address list. Happy Eyeballs will also
+ // cause out of order updates to arrive.
+
+ if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
+ b.cancelConnectionTimer()
+ if b.addressList.increment() {
+ b.requestConnectionLocked()
+ return
+ }
+ }
+
+ // End the first pass if we've seen a TRANSIENT_FAILURE from all
+ // SubConns once.
+ b.endFirstPassIfPossibleLocked(newState.ConnectionError)
+ }
+ return
+ }
+
+ // We have finished the first pass, keep re-connecting failing SubConns.
+ switch newState.ConnectivityState {
+ case connectivity.TransientFailure:
+ b.numTF = (b.numTF + 1) % b.subConns.Len()
+ sd.lastErr = newState.ConnectionError
+ if b.numTF%b.subConns.Len() == 0 {
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: newState.ConnectionError},
+ })
+ }
+ // We don't need to request re-resolution since the SubConn already
+ // does that before reporting TRANSIENT_FAILURE.
+ // TODO: #7534 - Move re-resolution requests from SubConn into
+ // pick_first.
case connectivity.Idle:
- if b.state == connectivity.TransientFailure {
- // We stay in TransientFailure until we are Ready. Also kick the
- // subConn out of Idle into Connecting. See A62.
- b.subConn.Connect()
+ sd.subConn.Connect()
+ }
+}
+
+// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
+// addresses are tried and their SubConns have reported a failure.
+func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
+ // An optimization to avoid iterating over the entire SubConn map.
+ if b.addressList.isValid() {
+ return
+ }
+ // Connect() has been called on all the SubConns. The first pass can be
+ // ended if all the SubConns have reported a failure.
+ for _, sd := range b.subConns.Values() {
+ if !sd.connectionFailedInFirstPass {
return
}
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &idlePicker{subConn: subConn},
+ }
+ b.firstPass = false
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: lastErr},
+ })
+ // Start re-connecting all the SubConns that are already in IDLE.
+ for _, sd := range b.subConns.Values() {
+ if sd.rawConnectivityState == connectivity.Idle {
+ sd.subConn.Connect()
+ }
+ }
+}
+
+func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
+ activeSD, found := b.subConns.Get(sd.addr)
+ return found && activeSD == sd
+}
+
+func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
+ b.mu.Lock()
+ defer b.mu.Unlock()
+ // Previously relevant SubConns can still callback with state updates.
+ // To prevent pickers from returning these obsolete SubConns, this logic
+ // is included to check if the current list of active SubConns includes
+ // this SubConn.
+ if !b.isActiveSCData(sd) {
+ return
+ }
+ sd.effectiveState = state.ConnectivityState
+ switch state.ConnectivityState {
+ case connectivity.Ready:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Ready,
+ Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
})
case connectivity.TransientFailure:
- b.cc.UpdateState(balancer.State{
- ConnectivityState: state.ConnectivityState,
- Picker: &picker{err: state.ConnectionError},
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.TransientFailure,
+ Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
+ })
+ case connectivity.Connecting:
+ b.updateBalancerState(balancer.State{
+ ConnectivityState: connectivity.Connecting,
+ Picker: &picker{err: balancer.ErrNoSubConnAvailable},
})
+ default:
+ b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
}
- b.state = state.ConnectivityState
}
-func (b *pickfirstBalancer) Close() {
+// updateBalancerState stores the state reported to the channel and calls
+// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
+// updates to the channel.
+func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
+ // In case of TransientFailures allow the picker to be updated to update
+ // the connectivity error, in all other cases don't send duplicate state
+ // updates.
+ if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
+ return
+ }
+ b.forceUpdateConcludedStateLocked(newState)
}
-func (b *pickfirstBalancer) ExitIdle() {
- if b.subConn != nil && b.state == connectivity.Idle {
- b.subConn.Connect()
- }
+// forceUpdateConcludedStateLocked stores the state reported to the channel and
+// calls ClientConn.UpdateState().
+// A separate function is defined to force update the ClientConn state since the
+// channel doesn't correctly assume that LB policies start in CONNECTING and
+// relies on LB policy to send an initial CONNECTING update.
+func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
+ b.state = newState.ConnectivityState
+ b.cc.UpdateState(newState)
}
type picker struct {
@@ -282,10 +822,87 @@ func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
// CONNECTING when Pick is called.
type idlePicker struct {
- subConn balancer.SubConn
+ exitIdle func()
}
func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.subConn.Connect()
+ i.exitIdle()
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
}
+
+// addressList manages sequentially iterating over addresses present in a list
+// of endpoints. It provides a 1 dimensional view of the addresses present in
+// the endpoints.
+// This type is not safe for concurrent access.
+type addressList struct {
+ addresses []resolver.Address
+ idx int
+}
+
+func (al *addressList) isValid() bool {
+ return al.idx < len(al.addresses)
+}
+
+func (al *addressList) size() int {
+ return len(al.addresses)
+}
+
+// increment moves to the next index in the address list.
+// This method returns false if it went off the list, true otherwise.
+func (al *addressList) increment() bool {
+ if !al.isValid() {
+ return false
+ }
+ al.idx++
+ return al.idx < len(al.addresses)
+}
+
+// currentAddress returns the current address pointed to in the addressList.
+// If the list is in an invalid state, it returns an empty address instead.
+func (al *addressList) currentAddress() resolver.Address {
+ if !al.isValid() {
+ return resolver.Address{}
+ }
+ return al.addresses[al.idx]
+}
+
+func (al *addressList) reset() {
+ al.idx = 0
+}
+
+func (al *addressList) updateAddrs(addrs []resolver.Address) {
+ al.addresses = addrs
+ al.reset()
+}
+
+// seekTo returns false if the needle was not found and the current index was
+// left unchanged.
+func (al *addressList) seekTo(needle resolver.Address) bool {
+ for ai, addr := range al.addresses {
+ if !equalAddressIgnoringBalAttributes(&addr, &needle) {
+ continue
+ }
+ al.idx = ai
+ return true
+ }
+ return false
+}
+
+// hasNext returns whether incrementing the addressList will result in moving
+// past the end of the list. If the list has already moved past the end, it
+// returns false.
+func (al *addressList) hasNext() bool {
+ if !al.isValid() {
+ return false
+ }
+ return al.idx+1 < len(al.addresses)
+}
+
+// equalAddressIgnoringBalAttributes returns true is a and b are considered
+// equal. This is different from the Equal method on the resolver.Address type
+// which considers all fields to determine equality. Here, we only consider
+// fields that are meaningful to the SubConn.
+func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
+ return a.Addr == b.Addr && a.ServerName == b.ServerName &&
+ a.Attributes.Equal(b.Attributes)
+}
diff --git a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
deleted file mode 100644
index 9ffdd28a0..000000000
--- a/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirstleaf/pickfirstleaf.go
+++ /dev/null
@@ -1,913 +0,0 @@
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package pickfirstleaf contains the pick_first load balancing policy which
-// will be the universal leaf policy after dualstack changes are implemented.
-//
-// # Experimental
-//
-// Notice: This package is EXPERIMENTAL and may be changed or removed in a
-// later release.
-package pickfirstleaf
-
-import (
- "encoding/json"
- "errors"
- "fmt"
- "net"
- "net/netip"
- "sync"
- "time"
-
- "google.golang.org/grpc/balancer"
- "google.golang.org/grpc/balancer/pickfirst/internal"
- "google.golang.org/grpc/connectivity"
- expstats "google.golang.org/grpc/experimental/stats"
- "google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/envconfig"
- internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/pretty"
- "google.golang.org/grpc/resolver"
- "google.golang.org/grpc/serviceconfig"
-)
-
-func init() {
- if envconfig.NewPickFirstEnabled {
- // Register as the default pick_first balancer.
- Name = "pick_first"
- }
- balancer.Register(pickfirstBuilder{})
-}
-
-// enableHealthListenerKeyType is a unique key type used in resolver
-// attributes to indicate whether the health listener usage is enabled.
-type enableHealthListenerKeyType struct{}
-
-var (
- logger = grpclog.Component("pick-first-leaf-lb")
- // Name is the name of the pick_first_leaf balancer.
- // It is changed to "pick_first" in init() if this balancer is to be
- // registered as the default pickfirst.
- Name = "pick_first_leaf"
- disconnectionsMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.disconnections",
- Description: "EXPERIMENTAL. Number of times the selected subchannel becomes disconnected.",
- Unit: "{disconnection}",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsSucceededMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_succeeded",
- Description: "EXPERIMENTAL. Number of successful connection attempts.",
- Unit: "{attempt}",
- Labels: []string{"grpc.target"},
- Default: false,
- })
- connectionAttemptsFailedMetric = expstats.RegisterInt64Count(expstats.MetricDescriptor{
- Name: "grpc.lb.pick_first.connection_attempts_failed",
- Description: "EXPERIMENTAL. Number of failed connection attempts.",
- Unit: "{attempt}",
- Labels: []string{"grpc.target"},
- Default: false,
- })
-)
-
-const (
- // TODO: change to pick-first when this becomes the default pick_first policy.
- logPrefix = "[pick-first-leaf-lb %p] "
- // connectionDelayInterval is the time to wait for during the happy eyeballs
- // pass before starting the next connection attempt.
- connectionDelayInterval = 250 * time.Millisecond
-)
-
-type ipAddrFamily int
-
-const (
- // ipAddrFamilyUnknown represents strings that can't be parsed as an IP
- // address.
- ipAddrFamilyUnknown ipAddrFamily = iota
- ipAddrFamilyV4
- ipAddrFamilyV6
-)
-
-type pickfirstBuilder struct{}
-
-func (pickfirstBuilder) Build(cc balancer.ClientConn, bo balancer.BuildOptions) balancer.Balancer {
- b := &pickfirstBalancer{
- cc: cc,
- target: bo.Target.String(),
- metricsRecorder: cc.MetricsRecorder(),
-
- subConns: resolver.NewAddressMapV2[*scData](),
- state: connectivity.Connecting,
- cancelConnectionTimer: func() {},
- }
- b.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf(logPrefix, b))
- return b
-}
-
-func (b pickfirstBuilder) Name() string {
- return Name
-}
-
-func (pickfirstBuilder) ParseConfig(js json.RawMessage) (serviceconfig.LoadBalancingConfig, error) {
- var cfg pfConfig
- if err := json.Unmarshal(js, &cfg); err != nil {
- return nil, fmt.Errorf("pickfirst: unable to unmarshal LB policy config: %s, error: %v", string(js), err)
- }
- return cfg, nil
-}
-
-// EnableHealthListener updates the state to configure pickfirst for using a
-// generic health listener.
-func EnableHealthListener(state resolver.State) resolver.State {
- state.Attributes = state.Attributes.WithValue(enableHealthListenerKeyType{}, true)
- return state
-}
-
-type pfConfig struct {
- serviceconfig.LoadBalancingConfig `json:"-"`
-
- // If set to true, instructs the LB policy to shuffle the order of the list
- // of endpoints received from the name resolver before attempting to
- // connect to them.
- ShuffleAddressList bool `json:"shuffleAddressList"`
-}
-
-// scData keeps track of the current state of the subConn.
-// It is not safe for concurrent access.
-type scData struct {
- // The following fields are initialized at build time and read-only after
- // that.
- subConn balancer.SubConn
- addr resolver.Address
-
- rawConnectivityState connectivity.State
- // The effective connectivity state based on raw connectivity, health state
- // and after following sticky TransientFailure behaviour defined in A62.
- effectiveState connectivity.State
- lastErr error
- connectionFailedInFirstPass bool
-}
-
-func (b *pickfirstBalancer) newSCData(addr resolver.Address) (*scData, error) {
- sd := &scData{
- rawConnectivityState: connectivity.Idle,
- effectiveState: connectivity.Idle,
- addr: addr,
- }
- sc, err := b.cc.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{
- StateListener: func(state balancer.SubConnState) {
- b.updateSubConnState(sd, state)
- },
- })
- if err != nil {
- return nil, err
- }
- sd.subConn = sc
- return sd, nil
-}
-
-type pickfirstBalancer struct {
- // The following fields are initialized at build time and read-only after
- // that and therefore do not need to be guarded by a mutex.
- logger *internalgrpclog.PrefixLogger
- cc balancer.ClientConn
- target string
- metricsRecorder expstats.MetricsRecorder // guaranteed to be non nil
-
- // The mutex is used to ensure synchronization of updates triggered
- // from the idle picker and the already serialized resolver,
- // SubConn state updates.
- mu sync.Mutex
- // State reported to the channel based on SubConn states and resolver
- // updates.
- state connectivity.State
- // scData for active subonns mapped by address.
- subConns *resolver.AddressMapV2[*scData]
- addressList addressList
- firstPass bool
- numTF int
- cancelConnectionTimer func()
- healthCheckingEnabled bool
-}
-
-// ResolverError is called by the ClientConn when the name resolver produces
-// an error or when pickfirst determined the resolver update to be invalid.
-func (b *pickfirstBalancer) ResolverError(err error) {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.resolverErrorLocked(err)
-}
-
-func (b *pickfirstBalancer) resolverErrorLocked(err error) {
- if b.logger.V(2) {
- b.logger.Infof("Received error from the name resolver: %v", err)
- }
-
- // The picker will not change since the balancer does not currently
- // report an error. If the balancer hasn't received a single good resolver
- // update yet, transition to TRANSIENT_FAILURE.
- if b.state != connectivity.TransientFailure && b.addressList.size() > 0 {
- if b.logger.V(2) {
- b.logger.Infof("Ignoring resolver error because balancer is using a previous good update.")
- }
- return
- }
-
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("name resolver error: %v", err)},
- })
-}
-
-func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.cancelConnectionTimer()
- if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
- // Cleanup state pertaining to the previous resolver state.
- // Treat an empty address list like an error by calling b.ResolverError.
- b.closeSubConnsLocked()
- b.addressList.updateAddrs(nil)
- b.resolverErrorLocked(errors.New("produced zero addresses"))
- return balancer.ErrBadResolverState
- }
- b.healthCheckingEnabled = state.ResolverState.Attributes.Value(enableHealthListenerKeyType{}) != nil
- cfg, ok := state.BalancerConfig.(pfConfig)
- if state.BalancerConfig != nil && !ok {
- return fmt.Errorf("pickfirst: received illegal BalancerConfig (type %T): %v: %w", state.BalancerConfig, state.BalancerConfig, balancer.ErrBadResolverState)
- }
-
- if b.logger.V(2) {
- b.logger.Infof("Received new config %s, resolver state %s", pretty.ToJSON(cfg), pretty.ToJSON(state.ResolverState))
- }
-
- var newAddrs []resolver.Address
- if endpoints := state.ResolverState.Endpoints; len(endpoints) != 0 {
- // Perform the optional shuffling described in gRFC A62. The shuffling
- // will change the order of endpoints but not touch the order of the
- // addresses within each endpoint. - A61
- if cfg.ShuffleAddressList {
- endpoints = append([]resolver.Endpoint{}, endpoints...)
- internal.RandShuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
- }
-
- // "Flatten the list by concatenating the ordered list of addresses for
- // each of the endpoints, in order." - A61
- for _, endpoint := range endpoints {
- newAddrs = append(newAddrs, endpoint.Addresses...)
- }
- } else {
- // Endpoints not set, process addresses until we migrate resolver
- // emissions fully to Endpoints. The top channel does wrap emitted
- // addresses with endpoints, however some balancers such as weighted
- // target do not forward the corresponding correct endpoints down/split
- // endpoints properly. Once all balancers correctly forward endpoints
- // down, can delete this else conditional.
- newAddrs = state.ResolverState.Addresses
- if cfg.ShuffleAddressList {
- newAddrs = append([]resolver.Address{}, newAddrs...)
- internal.RandShuffle(len(newAddrs), func(i, j int) { newAddrs[i], newAddrs[j] = newAddrs[j], newAddrs[i] })
- }
- }
-
- // If an address appears in multiple endpoints or in the same endpoint
- // multiple times, we keep it only once. We will create only one SubConn
- // for the address because an AddressMap is used to store SubConns.
- // Not de-duplicating would result in attempting to connect to the same
- // SubConn multiple times in the same pass. We don't want this.
- newAddrs = deDupAddresses(newAddrs)
- newAddrs = interleaveAddresses(newAddrs)
-
- prevAddr := b.addressList.currentAddress()
- prevSCData, found := b.subConns.Get(prevAddr)
- prevAddrsCount := b.addressList.size()
- isPrevRawConnectivityStateReady := found && prevSCData.rawConnectivityState == connectivity.Ready
- b.addressList.updateAddrs(newAddrs)
-
- // If the previous ready SubConn exists in new address list,
- // keep this connection and don't create new SubConns.
- if isPrevRawConnectivityStateReady && b.addressList.seekTo(prevAddr) {
- return nil
- }
-
- b.reconcileSubConnsLocked(newAddrs)
- // If it's the first resolver update or the balancer was already READY
- // (but the new address list does not contain the ready SubConn) or
- // CONNECTING, enter CONNECTING.
- // We may be in TRANSIENT_FAILURE due to a previous empty address list,
- // we should still enter CONNECTING because the sticky TF behaviour
- // mentioned in A62 applies only when the TRANSIENT_FAILURE is reported
- // due to connectivity failures.
- if isPrevRawConnectivityStateReady || b.state == connectivity.Connecting || prevAddrsCount == 0 {
- // Start connection attempt at first address.
- b.forceUpdateConcludedStateLocked(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.startFirstPassLocked()
- } else if b.state == connectivity.TransientFailure {
- // If we're in TRANSIENT_FAILURE, we stay in TRANSIENT_FAILURE until
- // we're READY. See A62.
- b.startFirstPassLocked()
- }
- return nil
-}
-
-// UpdateSubConnState is unused as a StateListener is always registered when
-// creating SubConns.
-func (b *pickfirstBalancer) UpdateSubConnState(subConn balancer.SubConn, state balancer.SubConnState) {
- b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", subConn, state)
-}
-
-func (b *pickfirstBalancer) Close() {
- b.mu.Lock()
- defer b.mu.Unlock()
- b.closeSubConnsLocked()
- b.cancelConnectionTimer()
- b.state = connectivity.Shutdown
-}
-
-// ExitIdle moves the balancer out of idle state. It can be called concurrently
-// by the idlePicker and clientConn so access to variables should be
-// synchronized.
-func (b *pickfirstBalancer) ExitIdle() {
- b.mu.Lock()
- defer b.mu.Unlock()
- if b.state == connectivity.Idle {
- // Move the balancer into CONNECTING state immediately. This is done to
- // avoid staying in IDLE if a resolver update arrives before the first
- // SubConn reports CONNECTING.
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- b.startFirstPassLocked()
- }
-}
-
-func (b *pickfirstBalancer) startFirstPassLocked() {
- b.firstPass = true
- b.numTF = 0
- // Reset the connection attempt record for existing SubConns.
- for _, sd := range b.subConns.Values() {
- sd.connectionFailedInFirstPass = false
- }
- b.requestConnectionLocked()
-}
-
-func (b *pickfirstBalancer) closeSubConnsLocked() {
- for _, sd := range b.subConns.Values() {
- sd.subConn.Shutdown()
- }
- b.subConns = resolver.NewAddressMapV2[*scData]()
-}
-
-// deDupAddresses ensures that each address appears only once in the slice.
-func deDupAddresses(addrs []resolver.Address) []resolver.Address {
- seenAddrs := resolver.NewAddressMapV2[*scData]()
- retAddrs := []resolver.Address{}
-
- for _, addr := range addrs {
- if _, ok := seenAddrs.Get(addr); ok {
- continue
- }
- retAddrs = append(retAddrs, addr)
- }
- return retAddrs
-}
-
-// interleaveAddresses interleaves addresses of both families (IPv4 and IPv6)
-// as per RFC-8305 section 4.
-// Whichever address family is first in the list is followed by an address of
-// the other address family; that is, if the first address in the list is IPv6,
-// then the first IPv4 address should be moved up in the list to be second in
-// the list. It doesn't support configuring "First Address Family Count", i.e.
-// there will always be a single member of the first address family at the
-// beginning of the interleaved list.
-// Addresses that are neither IPv4 nor IPv6 are treated as part of a third
-// "unknown" family for interleaving.
-// See: https://datatracker.ietf.org/doc/html/rfc8305#autoid-6
-func interleaveAddresses(addrs []resolver.Address) []resolver.Address {
- familyAddrsMap := map[ipAddrFamily][]resolver.Address{}
- interleavingOrder := []ipAddrFamily{}
- for _, addr := range addrs {
- family := addressFamily(addr.Addr)
- if _, found := familyAddrsMap[family]; !found {
- interleavingOrder = append(interleavingOrder, family)
- }
- familyAddrsMap[family] = append(familyAddrsMap[family], addr)
- }
-
- interleavedAddrs := make([]resolver.Address, 0, len(addrs))
-
- for curFamilyIdx := 0; len(interleavedAddrs) < len(addrs); curFamilyIdx = (curFamilyIdx + 1) % len(interleavingOrder) {
- // Some IP types may have fewer addresses than others, so we look for
- // the next type that has a remaining member to add to the interleaved
- // list.
- family := interleavingOrder[curFamilyIdx]
- remainingMembers := familyAddrsMap[family]
- if len(remainingMembers) > 0 {
- interleavedAddrs = append(interleavedAddrs, remainingMembers[0])
- familyAddrsMap[family] = remainingMembers[1:]
- }
- }
-
- return interleavedAddrs
-}
-
-// addressFamily returns the ipAddrFamily after parsing the address string.
-// If the address isn't of the format "ip-address:port", it returns
-// ipAddrFamilyUnknown. The address may be valid even if it's not an IP when
-// using a resolver like passthrough where the address may be a hostname in
-// some format that the dialer can resolve.
-func addressFamily(address string) ipAddrFamily {
- // Parse the IP after removing the port.
- host, _, err := net.SplitHostPort(address)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- ip, err := netip.ParseAddr(host)
- if err != nil {
- return ipAddrFamilyUnknown
- }
- switch {
- case ip.Is4() || ip.Is4In6():
- return ipAddrFamilyV4
- case ip.Is6():
- return ipAddrFamilyV6
- default:
- return ipAddrFamilyUnknown
- }
-}
-
-// reconcileSubConnsLocked updates the active subchannels based on a new address
-// list from the resolver. It does this by:
-// - closing subchannels: any existing subchannels associated with addresses
-// that are no longer in the updated list are shut down.
-// - removing subchannels: entries for these closed subchannels are removed
-// from the subchannel map.
-//
-// This ensures that the subchannel map accurately reflects the current set of
-// addresses received from the name resolver.
-func (b *pickfirstBalancer) reconcileSubConnsLocked(newAddrs []resolver.Address) {
- newAddrsMap := resolver.NewAddressMapV2[bool]()
- for _, addr := range newAddrs {
- newAddrsMap.Set(addr, true)
- }
-
- for _, oldAddr := range b.subConns.Keys() {
- if _, ok := newAddrsMap.Get(oldAddr); ok {
- continue
- }
- val, _ := b.subConns.Get(oldAddr)
- val.subConn.Shutdown()
- b.subConns.Delete(oldAddr)
- }
-}
-
-// shutdownRemainingLocked shuts down remaining subConns. Called when a subConn
-// becomes ready, which means that all other subConn must be shutdown.
-func (b *pickfirstBalancer) shutdownRemainingLocked(selected *scData) {
- b.cancelConnectionTimer()
- for _, sd := range b.subConns.Values() {
- if sd.subConn != selected.subConn {
- sd.subConn.Shutdown()
- }
- }
- b.subConns = resolver.NewAddressMapV2[*scData]()
- b.subConns.Set(selected.addr, selected)
-}
-
-// requestConnectionLocked starts connecting on the subchannel corresponding to
-// the current address. If no subchannel exists, one is created. If the current
-// subchannel is in TransientFailure, a connection to the next address is
-// attempted until a subchannel is found.
-func (b *pickfirstBalancer) requestConnectionLocked() {
- if !b.addressList.isValid() {
- return
- }
- var lastErr error
- for valid := true; valid; valid = b.addressList.increment() {
- curAddr := b.addressList.currentAddress()
- sd, ok := b.subConns.Get(curAddr)
- if !ok {
- var err error
- // We want to assign the new scData to sd from the outer scope,
- // hence we can't use := below.
- sd, err = b.newSCData(curAddr)
- if err != nil {
- // This should never happen, unless the clientConn is being shut
- // down.
- if b.logger.V(2) {
- b.logger.Infof("Failed to create a subConn for address %v: %v", curAddr.String(), err)
- }
- // Do nothing, the LB policy will be closed soon.
- return
- }
- b.subConns.Set(curAddr, sd)
- }
-
- switch sd.rawConnectivityState {
- case connectivity.Idle:
- sd.subConn.Connect()
- b.scheduleNextConnectionLocked()
- return
- case connectivity.TransientFailure:
- // The SubConn is being re-used and failed during a previous pass
- // over the addressList. It has not completed backoff yet.
- // Mark it as having failed and try the next address.
- sd.connectionFailedInFirstPass = true
- lastErr = sd.lastErr
- continue
- case connectivity.Connecting:
- // Wait for the connection attempt to complete or the timer to fire
- // before attempting the next address.
- b.scheduleNextConnectionLocked()
- return
- default:
- b.logger.Errorf("SubConn with unexpected state %v present in SubConns map.", sd.rawConnectivityState)
- return
-
- }
- }
-
- // All the remaining addresses in the list are in TRANSIENT_FAILURE, end the
- // first pass if possible.
- b.endFirstPassIfPossibleLocked(lastErr)
-}
-
-func (b *pickfirstBalancer) scheduleNextConnectionLocked() {
- b.cancelConnectionTimer()
- if !b.addressList.hasNext() {
- return
- }
- curAddr := b.addressList.currentAddress()
- cancelled := false // Access to this is protected by the balancer's mutex.
- closeFn := internal.TimeAfterFunc(connectionDelayInterval, func() {
- b.mu.Lock()
- defer b.mu.Unlock()
- // If the scheduled task is cancelled while acquiring the mutex, return.
- if cancelled {
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("Happy Eyeballs timer expired while waiting for connection to %q.", curAddr.Addr)
- }
- if b.addressList.increment() {
- b.requestConnectionLocked()
- }
- })
- // Access to the cancellation callback held by the balancer is guarded by
- // the balancer's mutex, so it's safe to set the boolean from the callback.
- b.cancelConnectionTimer = sync.OnceFunc(func() {
- cancelled = true
- closeFn()
- })
-}
-
-func (b *pickfirstBalancer) updateSubConnState(sd *scData, newState balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- oldState := sd.rawConnectivityState
- sd.rawConnectivityState = newState.ConnectivityState
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes this
- // SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- if newState.ConnectivityState == connectivity.Shutdown {
- sd.effectiveState = connectivity.Shutdown
- return
- }
-
- // Record a connection attempt when exiting CONNECTING.
- if newState.ConnectivityState == connectivity.TransientFailure {
- sd.connectionFailedInFirstPass = true
- connectionAttemptsFailedMetric.Record(b.metricsRecorder, 1, b.target)
- }
-
- if newState.ConnectivityState == connectivity.Ready {
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- b.shutdownRemainingLocked(sd)
- if !b.addressList.seekTo(sd.addr) {
- // This should not fail as we should have only one SubConn after
- // entering READY. The SubConn should be present in the addressList.
- b.logger.Errorf("Address %q not found address list in %v", sd.addr, b.addressList.addresses)
- return
- }
- if !b.healthCheckingEnabled {
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY and the health listener is disabled. Transitioning SubConn to READY.", sd.subConn)
- }
-
- sd.effectiveState = connectivity.Ready
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- return
- }
- if b.logger.V(2) {
- b.logger.Infof("SubConn %p reported connectivity state READY. Registering health listener.", sd.subConn)
- }
- // Send a CONNECTING update to take the SubConn out of sticky-TF if
- // required.
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- sd.subConn.RegisterHealthListener(func(scs balancer.SubConnState) {
- b.updateSubConnHealthState(sd, scs)
- })
- return
- }
-
- // If the LB policy is READY, and it receives a subchannel state change,
- // it means that the READY subchannel has failed.
- // A SubConn can also transition from CONNECTING directly to IDLE when
- // a transport is successfully created, but the connection fails
- // before the SubConn can send the notification for READY. We treat
- // this as a successful connection and transition to IDLE.
- // TODO: https://github.com/grpc/grpc-go/issues/7862 - Remove the second
- // part of the if condition below once the issue is fixed.
- if oldState == connectivity.Ready || (oldState == connectivity.Connecting && newState.ConnectivityState == connectivity.Idle) {
- // Once a transport fails, the balancer enters IDLE and starts from
- // the first address when the picker is used.
- b.shutdownRemainingLocked(sd)
- sd.effectiveState = newState.ConnectivityState
- // READY SubConn interspliced in between CONNECTING and IDLE, need to
- // account for that.
- if oldState == connectivity.Connecting {
- // A known issue (https://github.com/grpc/grpc-go/issues/7862)
- // causes a race that prevents the READY state change notification.
- // This works around it.
- connectionAttemptsSucceededMetric.Record(b.metricsRecorder, 1, b.target)
- }
- disconnectionsMetric.Record(b.metricsRecorder, 1, b.target)
- b.addressList.reset()
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Idle,
- Picker: &idlePicker{exitIdle: sync.OnceFunc(b.ExitIdle)},
- })
- return
- }
-
- if b.firstPass {
- switch newState.ConnectivityState {
- case connectivity.Connecting:
- // The effective state can be in either IDLE, CONNECTING or
- // TRANSIENT_FAILURE. If it's TRANSIENT_FAILURE, stay in
- // TRANSIENT_FAILURE until it's READY. See A62.
- if sd.effectiveState != connectivity.TransientFailure {
- sd.effectiveState = connectivity.Connecting
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- }
- case connectivity.TransientFailure:
- sd.lastErr = newState.ConnectionError
- sd.effectiveState = connectivity.TransientFailure
- // Since we're re-using common SubConns while handling resolver
- // updates, we could receive an out of turn TRANSIENT_FAILURE from
- // a pass over the previous address list. Happy Eyeballs will also
- // cause out of order updates to arrive.
-
- if curAddr := b.addressList.currentAddress(); equalAddressIgnoringBalAttributes(&curAddr, &sd.addr) {
- b.cancelConnectionTimer()
- if b.addressList.increment() {
- b.requestConnectionLocked()
- return
- }
- }
-
- // End the first pass if we've seen a TRANSIENT_FAILURE from all
- // SubConns once.
- b.endFirstPassIfPossibleLocked(newState.ConnectionError)
- }
- return
- }
-
- // We have finished the first pass, keep re-connecting failing SubConns.
- switch newState.ConnectivityState {
- case connectivity.TransientFailure:
- b.numTF = (b.numTF + 1) % b.subConns.Len()
- sd.lastErr = newState.ConnectionError
- if b.numTF%b.subConns.Len() == 0 {
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: newState.ConnectionError},
- })
- }
- // We don't need to request re-resolution since the SubConn already
- // does that before reporting TRANSIENT_FAILURE.
- // TODO: #7534 - Move re-resolution requests from SubConn into
- // pick_first.
- case connectivity.Idle:
- sd.subConn.Connect()
- }
-}
-
-// endFirstPassIfPossibleLocked ends the first happy-eyeballs pass if all the
-// addresses are tried and their SubConns have reported a failure.
-func (b *pickfirstBalancer) endFirstPassIfPossibleLocked(lastErr error) {
- // An optimization to avoid iterating over the entire SubConn map.
- if b.addressList.isValid() {
- return
- }
- // Connect() has been called on all the SubConns. The first pass can be
- // ended if all the SubConns have reported a failure.
- for _, sd := range b.subConns.Values() {
- if !sd.connectionFailedInFirstPass {
- return
- }
- }
- b.firstPass = false
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: lastErr},
- })
- // Start re-connecting all the SubConns that are already in IDLE.
- for _, sd := range b.subConns.Values() {
- if sd.rawConnectivityState == connectivity.Idle {
- sd.subConn.Connect()
- }
- }
-}
-
-func (b *pickfirstBalancer) isActiveSCData(sd *scData) bool {
- activeSD, found := b.subConns.Get(sd.addr)
- return found && activeSD == sd
-}
-
-func (b *pickfirstBalancer) updateSubConnHealthState(sd *scData, state balancer.SubConnState) {
- b.mu.Lock()
- defer b.mu.Unlock()
- // Previously relevant SubConns can still callback with state updates.
- // To prevent pickers from returning these obsolete SubConns, this logic
- // is included to check if the current list of active SubConns includes
- // this SubConn.
- if !b.isActiveSCData(sd) {
- return
- }
- sd.effectiveState = state.ConnectivityState
- switch state.ConnectivityState {
- case connectivity.Ready:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Ready,
- Picker: &picker{result: balancer.PickResult{SubConn: sd.subConn}},
- })
- case connectivity.TransientFailure:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.TransientFailure,
- Picker: &picker{err: fmt.Errorf("pickfirst: health check failure: %v", state.ConnectionError)},
- })
- case connectivity.Connecting:
- b.updateBalancerState(balancer.State{
- ConnectivityState: connectivity.Connecting,
- Picker: &picker{err: balancer.ErrNoSubConnAvailable},
- })
- default:
- b.logger.Errorf("Got unexpected health update for SubConn %p: %v", state)
- }
-}
-
-// updateBalancerState stores the state reported to the channel and calls
-// ClientConn.UpdateState(). As an optimization, it avoids sending duplicate
-// updates to the channel.
-func (b *pickfirstBalancer) updateBalancerState(newState balancer.State) {
- // In case of TransientFailures allow the picker to be updated to update
- // the connectivity error, in all other cases don't send duplicate state
- // updates.
- if newState.ConnectivityState == b.state && b.state != connectivity.TransientFailure {
- return
- }
- b.forceUpdateConcludedStateLocked(newState)
-}
-
-// forceUpdateConcludedStateLocked stores the state reported to the channel and
-// calls ClientConn.UpdateState().
-// A separate function is defined to force update the ClientConn state since the
-// channel doesn't correctly assume that LB policies start in CONNECTING and
-// relies on LB policy to send an initial CONNECTING update.
-func (b *pickfirstBalancer) forceUpdateConcludedStateLocked(newState balancer.State) {
- b.state = newState.ConnectivityState
- b.cc.UpdateState(newState)
-}
-
-type picker struct {
- result balancer.PickResult
- err error
-}
-
-func (p *picker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- return p.result, p.err
-}
-
-// idlePicker is used when the SubConn is IDLE and kicks the SubConn into
-// CONNECTING when Pick is called.
-type idlePicker struct {
- exitIdle func()
-}
-
-func (i *idlePicker) Pick(balancer.PickInfo) (balancer.PickResult, error) {
- i.exitIdle()
- return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
-}
-
-// addressList manages sequentially iterating over addresses present in a list
-// of endpoints. It provides a 1 dimensional view of the addresses present in
-// the endpoints.
-// This type is not safe for concurrent access.
-type addressList struct {
- addresses []resolver.Address
- idx int
-}
-
-func (al *addressList) isValid() bool {
- return al.idx < len(al.addresses)
-}
-
-func (al *addressList) size() int {
- return len(al.addresses)
-}
-
-// increment moves to the next index in the address list.
-// This method returns false if it went off the list, true otherwise.
-func (al *addressList) increment() bool {
- if !al.isValid() {
- return false
- }
- al.idx++
- return al.idx < len(al.addresses)
-}
-
-// currentAddress returns the current address pointed to in the addressList.
-// If the list is in an invalid state, it returns an empty address instead.
-func (al *addressList) currentAddress() resolver.Address {
- if !al.isValid() {
- return resolver.Address{}
- }
- return al.addresses[al.idx]
-}
-
-func (al *addressList) reset() {
- al.idx = 0
-}
-
-func (al *addressList) updateAddrs(addrs []resolver.Address) {
- al.addresses = addrs
- al.reset()
-}
-
-// seekTo returns false if the needle was not found and the current index was
-// left unchanged.
-func (al *addressList) seekTo(needle resolver.Address) bool {
- for ai, addr := range al.addresses {
- if !equalAddressIgnoringBalAttributes(&addr, &needle) {
- continue
- }
- al.idx = ai
- return true
- }
- return false
-}
-
-// hasNext returns whether incrementing the addressList will result in moving
-// past the end of the list. If the list has already moved past the end, it
-// returns false.
-func (al *addressList) hasNext() bool {
- if !al.isValid() {
- return false
- }
- return al.idx+1 < len(al.addresses)
-}
-
-// equalAddressIgnoringBalAttributes returns true is a and b are considered
-// equal. This is different from the Equal method on the resolver.Address type
-// which considers all fields to determine equality. Here, we only consider
-// fields that are meaningful to the SubConn.
-func equalAddressIgnoringBalAttributes(a, b *resolver.Address) bool {
- return a.Addr == b.Addr && a.ServerName == b.ServerName &&
- a.Attributes.Equal(b.Attributes)
-}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index 22045bf39..22e6e3267 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -26,7 +26,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/endpointsharding"
- "google.golang.org/grpc/balancer/pickfirst/pickfirstleaf"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/grpclog"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
)
@@ -47,7 +47,7 @@ func (bb builder) Name() string {
}
func (bb builder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
- childBuilder := balancer.Get(pickfirstleaf.Name).Build
+ childBuilder := balancer.Get(pickfirst.Name).Build
bal := &rrBalancer{
cc: cc,
Balancer: endpointsharding.NewBalancer(cc, opts, childBuilder, endpointsharding.Options{}),
@@ -67,6 +67,6 @@ func (b *rrBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error {
return b.Balancer.UpdateClientConnState(balancer.ClientConnState{
// Enable the health listener in pickfirst children for client side health
// checks and outlier detection, if configured.
- ResolverState: pickfirstleaf.EnableHealthListener(ccs.ResolverState),
+ ResolverState: pickfirst.EnableHealthListener(ccs.ResolverState),
})
}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index 948a21ef6..2c760e623 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -450,13 +450,14 @@ func (acbw *acBalancerWrapper) healthListenerRegFn() func(context.Context, func(
if acbw.ccb.cc.dopts.disableHealthCheck {
return noOpRegisterHealthListenerFn
}
+ cfg := acbw.ac.cc.healthCheckConfig()
+ if cfg == nil {
+ return noOpRegisterHealthListenerFn
+ }
regHealthLisFn := internal.RegisterClientHealthCheckListener
if regHealthLisFn == nil {
// The health package is not imported.
- return noOpRegisterHealthListenerFn
- }
- cfg := acbw.ac.cc.healthCheckConfig()
- if cfg == nil {
+ channelz.Error(logger, acbw.ac.channelz, "Health check is requested but health package is not imported.")
return noOpRegisterHealthListenerFn
}
return func(ctx context.Context, listener func(balancer.SubConnState)) func() {
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index b1364a032..42c61cf9f 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.6
+// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/binlog/v1/binarylog.proto
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index a3c315f2d..c0c2c9a76 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -40,11 +40,12 @@ import (
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/internal/idle"
iresolver "google.golang.org/grpc/internal/resolver"
- "google.golang.org/grpc/internal/stats"
+ istats "google.golang.org/grpc/internal/stats"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
+ "google.golang.org/grpc/stats"
"google.golang.org/grpc/status"
_ "google.golang.org/grpc/balancer/roundrobin" // To register roundrobin.
@@ -210,7 +211,8 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper()
- cc.metricsRecorderList = stats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+ cc.metricsRecorderList = istats.NewMetricsRecorderList(cc.dopts.copts.StatsHandlers)
+ cc.statsHandler = istats.NewCombinedHandler(cc.dopts.copts.StatsHandlers...)
cc.initIdleStateLocked() // Safe to call without the lock, since nothing else has a reference to cc.
cc.idlenessMgr = idle.NewManager((*idler)(cc), cc.dopts.idleTimeout)
@@ -621,7 +623,8 @@ type ClientConn struct {
channelz *channelz.Channel // Channelz object.
resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
- metricsRecorderList *stats.MetricsRecorderList
+ metricsRecorderList *istats.MetricsRecorderList
+ statsHandler stats.Handler
// The following provide their own synchronization, and therefore don't
// require cc.mu to be held to access them.
diff --git a/vendor/google.golang.org/grpc/credentials/credentials.go b/vendor/google.golang.org/grpc/credentials/credentials.go
index c8e337cdd..06f6c6c70 100644
--- a/vendor/google.golang.org/grpc/credentials/credentials.go
+++ b/vendor/google.golang.org/grpc/credentials/credentials.go
@@ -44,8 +44,7 @@ type PerRPCCredentials interface {
// A54). uri is the URI of the entry point for the request. When supported
// by the underlying implementation, ctx can be used for timeout and
// cancellation. Additionally, RequestInfo data will be available via ctx
- // to this call. TODO(zhaoq): Define the set of the qualified keys instead
- // of leaving it as an arbitrary string.
+ // to this call.
GetRequestMetadata(ctx context.Context, uri ...string) (map[string]string, error)
// RequireTransportSecurity indicates whether the credentials requires
// transport security.
diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go
index 11d0ae142..dadd21e40 100644
--- a/vendor/google.golang.org/grpc/encoding/encoding.go
+++ b/vendor/google.golang.org/grpc/encoding/encoding.go
@@ -27,8 +27,10 @@ package encoding
import (
"io"
+ "slices"
"strings"
+ "google.golang.org/grpc/encoding/internal"
"google.golang.org/grpc/internal/grpcutil"
)
@@ -36,6 +38,24 @@ import (
// It is intended for grpc internal use only.
const Identity = "identity"
+func init() {
+ internal.RegisterCompressorForTesting = func(c Compressor) func() {
+ name := c.Name()
+ curCompressor, found := registeredCompressor[name]
+ RegisterCompressor(c)
+ return func() {
+ if found {
+ registeredCompressor[name] = curCompressor
+ return
+ }
+ delete(registeredCompressor, name)
+ grpcutil.RegisteredCompressorNames = slices.DeleteFunc(grpcutil.RegisteredCompressorNames, func(s string) bool {
+ return s == name
+ })
+ }
+ }
+}
+
// Compressor is used for compressing and decompressing when sending or
// receiving messages.
//
diff --git a/vendor/google.golang.org/grpc/encoding/internal/internal.go b/vendor/google.golang.org/grpc/encoding/internal/internal.go
new file mode 100644
index 000000000..ee9acb437
--- /dev/null
+++ b/vendor/google.golang.org/grpc/encoding/internal/internal.go
@@ -0,0 +1,28 @@
+/*
+ *
+ * Copyright 2025 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+// Package internal contains code internal to the encoding package.
+package internal
+
+// RegisterCompressorForTesting registers a compressor in the global compressor
+// registry. It returns a cleanup function that should be called at the end
+// of the test to unregister the compressor.
+//
+// This prevents compressors registered in one test from appearing in the
+// encoding headers of subsequent tests.
+var RegisterCompressorForTesting any // func RegisterCompressor(c Compressor) func()
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
index ad75313a1..2b57ba65a 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metricregistry.go
@@ -75,6 +75,7 @@ const (
MetricTypeIntHisto
MetricTypeFloatHisto
MetricTypeIntGauge
+ MetricTypeIntUpDownCount
)
// Int64CountHandle is a typed handle for a int count metric. This handle
@@ -93,6 +94,23 @@ func (h *Int64CountHandle) Record(recorder MetricsRecorder, incr int64, labels .
recorder.RecordInt64Count(h, incr, labels...)
}
+// Int64UpDownCountHandle is a typed handle for an int up-down counter metric.
+// This handle is passed at the recording point in order to know which metric
+// to record on.
+type Int64UpDownCountHandle MetricDescriptor
+
+// Descriptor returns the int64 up-down counter handle typecast to a pointer to a
+// MetricDescriptor.
+func (h *Int64UpDownCountHandle) Descriptor() *MetricDescriptor {
+ return (*MetricDescriptor)(h)
+}
+
+// Record records the int64 up-down counter value on the metrics recorder provided.
+// The value 'v' can be positive to increment or negative to decrement.
+func (h *Int64UpDownCountHandle) Record(recorder MetricsRecorder, v int64, labels ...string) {
+ recorder.RecordInt64UpDownCount(h, v, labels...)
+}
+
// Float64CountHandle is a typed handle for a float count metric. This handle is
// passed at the recording point in order to know which metric to record on.
type Float64CountHandle MetricDescriptor
@@ -249,6 +267,21 @@ func RegisterInt64Gauge(descriptor MetricDescriptor) *Int64GaugeHandle {
return (*Int64GaugeHandle)(descPtr)
}
+// RegisterInt64UpDownCount registers the metric description onto the global registry.
+// It returns a typed handle to use for recording data.
+//
+// NOTE: this function must only be called during initialization time (i.e. in
+// an init() function), and is not thread-safe. If multiple metrics are
+// registered with the same name, this function will panic.
+func RegisterInt64UpDownCount(descriptor MetricDescriptor) *Int64UpDownCountHandle {
+ registerMetric(descriptor.Name, descriptor.Default)
+ // Set the specific metric type for the up-down counter
+ descriptor.Type = MetricTypeIntUpDownCount
+ descPtr := &descriptor
+ metricsRegistry[descriptor.Name] = descPtr
+ return (*Int64UpDownCountHandle)(descPtr)
+}
+
// snapshotMetricsRegistryForTesting snapshots the global data of the metrics
// registry. Returns a cleanup function that sets the metrics registry to its
// original state.
diff --git a/vendor/google.golang.org/grpc/experimental/stats/metrics.go b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
index ee1423605..cb57f1a74 100644
--- a/vendor/google.golang.org/grpc/experimental/stats/metrics.go
+++ b/vendor/google.golang.org/grpc/experimental/stats/metrics.go
@@ -38,6 +38,9 @@ type MetricsRecorder interface {
// RecordInt64Gauge records the measurement alongside labels on the int
// gauge associated with the provided handle.
RecordInt64Gauge(handle *Int64GaugeHandle, incr int64, labels ...string)
+ // RecordInt64UpDownCounter records the measurement alongside labels on the int
+ // count associated with the provided handle.
+ RecordInt64UpDownCount(handle *Int64UpDownCountHandle, incr int64, labels ...string)
}
// Metrics is an experimental legacy alias of the now-stable stats.MetricSet.
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 22d263fb9..8f7d9f6bb 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.36.6
+// protoc-gen-go v1.36.10
// protoc v5.27.1
// source: grpc/health/v1/health.proto
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 7e060f5ed..91f760936 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -52,12 +52,6 @@ var (
// or "false".
EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", true)
- // NewPickFirstEnabled is set if the new pickfirst leaf policy is to be used
- // instead of the exiting pickfirst implementation. This can be disabled by
- // setting the environment variable "GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST"
- // to "false".
- NewPickFirstEnabled = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_NEW_PICK_FIRST", true)
-
// XDSEndpointHashKeyBackwardCompat controls the parsing of the endpoint hash
// key from EDS LbEndpoint metadata. Endpoint hash keys can be disabled by
// setting "GRPC_XDS_ENDPOINT_HASH_KEY_BACKWARD_COMPAT" to "true". When the
@@ -75,6 +69,14 @@ var (
// ALTSHandshakerKeepaliveParams is set if we should add the
// KeepaliveParams when dial the ALTS handshaker service.
ALTSHandshakerKeepaliveParams = boolFromEnv("GRPC_EXPERIMENTAL_ALTS_HANDSHAKER_KEEPALIVE_PARAMS", false)
+
+ // EnableDefaultPortForProxyTarget controls whether the resolver adds a default port 443
+ // to a target address that lacks one. This flag only has an effect when all of
+ // the following conditions are met:
+ // - A connect proxy is being used.
+ // - Target resolution is disabled.
+ // - The DNS resolver is being used.
+ EnableDefaultPortForProxyTarget = boolFromEnv("GRPC_EXPERIMENTAL_ENABLE_DEFAULT_PORT_FOR_PROXY_TARGET", true)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
index b1f883bca..7685d08b5 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go
@@ -74,4 +74,9 @@ var (
// For more details, see:
// https://github.com/grpc/proposal/blob/master/A86-xds-http-connect.md
XDSHTTPConnectEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_HTTP_CONNECT", false)
+
+ // XDSBootstrapCallCredsEnabled controls if call credentials can be used in
+ // xDS bootstrap configuration via the `call_creds` field. For more details,
+ // see: https://github.com/grpc/proposal/blob/master/A97-xds-jwt-call-creds.md
+ XDSBootstrapCallCredsEnabled = boolFromEnv("GRPC_EXPERIMENTAL_XDS_BOOTSTRAP_CALL_CREDS", false)
)
diff --git a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
index 20b8fb098..5bfa67b72 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/delegatingresolver/delegatingresolver.go
@@ -22,11 +22,13 @@ package delegatingresolver
import (
"fmt"
+ "net"
"net/http"
"net/url"
"sync"
"google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal/envconfig"
"google.golang.org/grpc/internal/proxyattributes"
"google.golang.org/grpc/internal/transport"
"google.golang.org/grpc/internal/transport/networktype"
@@ -40,6 +42,8 @@ var (
HTTPSProxyFromEnvironment = http.ProxyFromEnvironment
)
+const defaultPort = "443"
+
// delegatingResolver manages both target URI and proxy address resolution by
// delegating these tasks to separate child resolvers. Essentially, it acts as
// an intermediary between the gRPC ClientConn and the child resolvers.
@@ -107,10 +111,18 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
targetResolver: nopResolver{},
}
+ addr := target.Endpoint()
var err error
- r.proxyURL, err = proxyURLForTarget(target.Endpoint())
+ if target.URL.Scheme == "dns" && !targetResolutionEnabled && envconfig.EnableDefaultPortForProxyTarget {
+ addr, err = parseTarget(addr)
+ if err != nil {
+ return nil, fmt.Errorf("delegating_resolver: invalid target address %q: %v", target.Endpoint(), err)
+ }
+ }
+
+ r.proxyURL, err = proxyURLForTarget(addr)
if err != nil {
- return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %s: %v", target, err)
+ return nil, fmt.Errorf("delegating_resolver: failed to determine proxy URL for target %q: %v", target, err)
}
// proxy is not configured or proxy address excluded using `NO_PROXY` env
@@ -132,8 +144,8 @@ func New(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOpti
// bypass the target resolver and store the unresolved target address.
if target.URL.Scheme == "dns" && !targetResolutionEnabled {
r.targetResolverState = &resolver.State{
- Addresses: []resolver.Address{{Addr: target.Endpoint()}},
- Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: target.Endpoint()}}}},
+ Addresses: []resolver.Address{{Addr: addr}},
+ Endpoints: []resolver.Endpoint{{Addresses: []resolver.Address{{Addr: addr}}}},
}
r.updateTargetResolverState(*r.targetResolverState)
return r, nil
@@ -202,6 +214,44 @@ func needsProxyResolver(state *resolver.State) bool {
return false
}
+// parseTarget takes a target string and ensures it is a valid "host:port" target.
+//
+// It does the following:
+// 1. If the target already has a port (e.g., "host:port", "[ipv6]:port"),
+// it is returned as is.
+// 2. If the host part is empty (e.g., ":80"), it defaults to "localhost",
+// returning "localhost:80".
+// 3. If the target is missing a port (e.g., "host", "ipv6"), the defaultPort
+// is added.
+//
+// An error is returned for empty targets or targets with a trailing colon
+// but no port (e.g., "host:").
+func parseTarget(target string) (string, error) {
+ if target == "" {
+ return "", fmt.Errorf("missing address")
+ }
+
+ host, port, err := net.SplitHostPort(target)
+ if err != nil {
+ // If SplitHostPort fails, it's likely because the port is missing.
+ // We append the default port and return the result.
+ return net.JoinHostPort(target, defaultPort), nil
+ }
+
+ // If SplitHostPort succeeds, we check for edge cases.
+ if port == "" {
+ // A success with an empty port means the target had a trailing colon,
+ // e.g., "host:", which is an error.
+ return "", fmt.Errorf("missing port after port-separator colon")
+ }
+ if host == "" {
+ // A success with an empty host means the target was like ":80".
+ // We default the host to "localhost".
+ host = "localhost"
+ }
+ return net.JoinHostPort(host, port), nil
+}
+
func skipProxy(address resolver.Address) bool {
// Avoid proxy when network is not tcp.
networkType, ok := networktype.Get(address)
diff --git a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
index 79044657b..d5f7e4d62 100644
--- a/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
+++ b/vendor/google.golang.org/grpc/internal/stats/metrics_recorder_list.go
@@ -64,6 +64,16 @@ func (l *MetricsRecorderList) RecordInt64Count(handle *estats.Int64CountHandle,
}
}
+// RecordInt64UpDownCount records the measurement alongside labels on the int
+// count associated with the provided handle.
+func (l *MetricsRecorderList) RecordInt64UpDownCount(handle *estats.Int64UpDownCountHandle, incr int64, labels ...string) {
+ verifyLabels(handle.Descriptor(), labels...)
+
+ for _, metricRecorder := range l.metricsRecorders {
+ metricRecorder.RecordInt64UpDownCount(handle, incr, labels...)
+ }
+}
+
// RecordFloat64Count records the measurement alongside labels on the float
// count associated with the provided handle.
func (l *MetricsRecorderList) RecordFloat64Count(handle *estats.Float64CountHandle, incr float64, labels ...string) {
diff --git a/vendor/google.golang.org/grpc/internal/stats/stats.go b/vendor/google.golang.org/grpc/internal/stats/stats.go
new file mode 100644
index 000000000..49019b80d
--- /dev/null
+++ b/vendor/google.golang.org/grpc/internal/stats/stats.go
@@ -0,0 +1,70 @@
+/*
+ *
+ * Copyright 2025 gRPC authors.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package stats
+
+import (
+ "context"
+
+ "google.golang.org/grpc/stats"
+)
+
+type combinedHandler struct {
+ handlers []stats.Handler
+}
+
+// NewCombinedHandler combines multiple stats.Handlers into a single handler.
+//
+// It returns nil if no handlers are provided. If only one handler is
+// provided, it is returned directly without wrapping.
+func NewCombinedHandler(handlers ...stats.Handler) stats.Handler {
+ switch len(handlers) {
+ case 0:
+ return nil
+ case 1:
+ return handlers[0]
+ default:
+ return &combinedHandler{handlers: handlers}
+ }
+}
+
+func (ch *combinedHandler) TagRPC(ctx context.Context, info *stats.RPCTagInfo) context.Context {
+ for _, h := range ch.handlers {
+ ctx = h.TagRPC(ctx, info)
+ }
+ return ctx
+}
+
+func (ch *combinedHandler) HandleRPC(ctx context.Context, stats stats.RPCStats) {
+ for _, h := range ch.handlers {
+ h.HandleRPC(ctx, stats)
+ }
+}
+
+func (ch *combinedHandler) TagConn(ctx context.Context, info *stats.ConnTagInfo) context.Context {
+ for _, h := range ch.handlers {
+ ctx = h.TagConn(ctx, info)
+ }
+ return ctx
+}
+
+func (ch *combinedHandler) HandleConn(ctx context.Context, stats stats.ConnStats) {
+ for _, h := range ch.handlers {
+ h.HandleConn(ctx, stats)
+ }
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/client_stream.go b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
index ccc0e017e..980452519 100644
--- a/vendor/google.golang.org/grpc/internal/transport/client_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/client_stream.go
@@ -29,25 +29,27 @@ import (
// ClientStream implements streaming functionality for a gRPC client.
type ClientStream struct {
- *Stream // Embed for common stream functionality.
+ Stream // Embed for common stream functionality.
ct *http2Client
done chan struct{} // closed at the end of stream to unblock writers.
doneFunc func() // invoked at the end of stream.
- headerChan chan struct{} // closed to indicate the end of header metadata.
- headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ headerChan chan struct{} // closed to indicate the end of header metadata.
+ header metadata.MD // the received header metadata
+
+ status *status.Status // the status error received from the server
+
+ // Non-pointer fields are at the end to optimize GC allocations.
+
// headerValid indicates whether a valid header was received. Only
// meaningful after headerChan is closed (always call waitOnHeader() before
// reading its value).
- headerValid bool
- header metadata.MD // the received header metadata
- noHeaders bool // set if the client never received headers (set only after the stream is done).
-
- bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
- unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
-
- status *status.Status // the status error received from the server
+ headerValid bool
+ noHeaders bool // set if the client never received headers (set only after the stream is done).
+ headerChanClosed uint32 // set when headerChan is closed. Used to avoid closing headerChan multiple times.
+ bytesReceived atomic.Bool // indicates whether any bytes have been received on this stream
+ unprocessed atomic.Bool // set if the server sends a refused stream or GOAWAY including this stream
}
// Read reads an n byte message from the input stream.
@@ -142,3 +144,11 @@ func (s *ClientStream) TrailersOnly() bool {
func (s *ClientStream) Status() *status.Status {
return s.status
}
+
+func (s *ClientStream) requestRead(n int) {
+ s.ct.adjustWindow(s, uint32(n))
+}
+
+func (s *ClientStream) updateWindow(n int) {
+ s.ct.updateWindow(s, uint32(n))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
index a2831e5d0..2dcd1e63b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
+++ b/vendor/google.golang.org/grpc/internal/transport/controlbuf.go
@@ -496,6 +496,16 @@ const (
serverSide
)
+// maxWriteBufSize is the maximum length (number of elements) the cached
+// writeBuf can grow to. The length depends on the number of buffers
+// contained within the BufferSlice produced by the codec, which is
+// generally small.
+//
+// If a writeBuf larger than this limit is required, it will be allocated
+// and freed after use, rather than being cached. This avoids holding
+// on to large amounts of memory.
+const maxWriteBufSize = 64
+
// Loopy receives frames from the control buffer.
// Each frame is handled individually; most of the work done by loopy goes
// into handling data frames. Loopy maintains a queue of active streams, and each
@@ -530,6 +540,8 @@ type loopyWriter struct {
// Side-specific handlers
ssGoAwayHandler func(*goAway) (bool, error)
+
+ writeBuf [][]byte // cached slice to avoid heap allocations for calls to mem.Reader.Peek.
}
func newLoopyWriter(s side, fr *framer, cbuf *controlBuffer, bdpEst *bdpEstimator, conn net.Conn, logger *grpclog.PrefixLogger, goAwayHandler func(*goAway) (bool, error), bufferPool mem.BufferPool) *loopyWriter {
@@ -665,11 +677,10 @@ func (l *loopyWriter) incomingSettingsHandler(s *incomingSettings) error {
func (l *loopyWriter) registerStreamHandler(h *registerStream) {
str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- reader: mem.BufferSlice{}.Reader(),
+ id: h.streamID,
+ state: empty,
+ itl: &itemList{},
+ wq: h.wq,
}
l.estdStreams[h.streamID] = str
}
@@ -701,11 +712,10 @@ func (l *loopyWriter) headerHandler(h *headerFrame) error {
}
// Case 2: Client wants to originate stream.
str := &outStream{
- id: h.streamID,
- state: empty,
- itl: &itemList{},
- wq: h.wq,
- reader: mem.BufferSlice{}.Reader(),
+ id: h.streamID,
+ state: empty,
+ itl: &itemList{},
+ wq: h.wq,
}
return l.originateStream(str, h)
}
@@ -948,11 +958,11 @@ func (l *loopyWriter) processData() (bool, error) {
if str == nil {
return true, nil
}
- reader := str.reader
+ reader := &str.reader
dataItem := str.itl.peek().(*dataFrame) // Peek at the first data item this stream.
if !dataItem.processing {
dataItem.processing = true
- str.reader.Reset(dataItem.data)
+ reader.Reset(dataItem.data)
dataItem.data.Free()
}
// A data item is represented by a dataFrame, since it later translates into
@@ -964,11 +974,11 @@ func (l *loopyWriter) processData() (bool, error) {
if len(dataItem.h) == 0 && reader.Remaining() == 0 { // Empty data frame
// Client sends out empty data frame with endStream = true
- if err := l.framer.fr.WriteData(dataItem.streamID, dataItem.endStream, nil); err != nil {
+ if err := l.framer.writeData(dataItem.streamID, dataItem.endStream, nil); err != nil {
return false, err
}
str.itl.dequeue() // remove the empty data item from stream
- _ = reader.Close()
+ reader.Close()
if str.itl.isEmpty() {
str.state = empty
} else if trailer, ok := str.itl.peek().(*headerFrame); ok { // the next item is trailers.
@@ -1001,25 +1011,20 @@ func (l *loopyWriter) processData() (bool, error) {
remainingBytes := len(dataItem.h) + reader.Remaining() - hSize - dSize
size := hSize + dSize
- var buf *[]byte
-
- if hSize != 0 && dSize == 0 {
- buf = &dataItem.h
- } else {
- // Note: this is only necessary because the http2.Framer does not support
- // partially writing a frame, so the sequence must be materialized into a buffer.
- // TODO: Revisit once https://github.com/golang/go/issues/66655 is addressed.
- pool := l.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
+ l.writeBuf = l.writeBuf[:0]
+ if hSize > 0 {
+ l.writeBuf = append(l.writeBuf, dataItem.h[:hSize])
+ }
+ if dSize > 0 {
+ var err error
+ l.writeBuf, err = reader.Peek(dSize, l.writeBuf)
+ if err != nil {
+ // This must never happen since the reader must have at least dSize
+ // bytes.
+ // Log an error to fail tests.
+ l.logger.Errorf("unexpected error while reading Data frame payload: %v", err)
+ return false, err
}
- buf = pool.Get(size)
- defer pool.Put(buf)
-
- copy((*buf)[:hSize], dataItem.h)
- _, _ = reader.Read((*buf)[hSize:])
}
// Now that outgoing flow controls are checked we can replenish str's write quota
@@ -1032,7 +1037,14 @@ func (l *loopyWriter) processData() (bool, error) {
if dataItem.onEachWrite != nil {
dataItem.onEachWrite()
}
- if err := l.framer.fr.WriteData(dataItem.streamID, endStream, (*buf)[:size]); err != nil {
+ err := l.framer.writeData(dataItem.streamID, endStream, l.writeBuf)
+ reader.Discard(dSize)
+ if cap(l.writeBuf) > maxWriteBufSize {
+ l.writeBuf = nil
+ } else {
+ clear(l.writeBuf)
+ }
+ if err != nil {
return false, err
}
str.bytesOutStanding += size
@@ -1040,7 +1052,7 @@ func (l *loopyWriter) processData() (bool, error) {
dataItem.h = dataItem.h[hSize:]
if remainingBytes == 0 { // All the data from that message was written out.
- _ = reader.Close()
+ reader.Close()
str.itl.dequeue()
}
if str.itl.isEmpty() {
diff --git a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
index dfc0f224e..7cfbc9637 100644
--- a/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
+++ b/vendor/google.golang.org/grpc/internal/transport/flowcontrol.go
@@ -28,7 +28,7 @@ import (
// writeQuota is a soft limit on the amount of data a stream can
// schedule before some of it is written out.
type writeQuota struct {
- quota int32
+ _ noCopy
// get waits on read from when quota goes less than or equal to zero.
// replenish writes on it when quota goes positive again.
ch chan struct{}
@@ -38,16 +38,17 @@ type writeQuota struct {
// It is implemented as a field so that it can be updated
// by tests.
replenish func(n int)
+ quota int32
}
-func newWriteQuota(sz int32, done <-chan struct{}) *writeQuota {
- w := &writeQuota{
- quota: sz,
- ch: make(chan struct{}, 1),
- done: done,
- }
+// init allows a writeQuota to be initialized in-place, which is useful for
+// resetting a buffer or for avoiding a heap allocation when the buffer is
+// embedded in another struct.
+func (w *writeQuota) init(sz int32, done <-chan struct{}) {
+ w.quota = sz
+ w.ch = make(chan struct{}, 1)
+ w.done = done
w.replenish = w.realReplenish
- return w
}
func (w *writeQuota) get(sz int32) error {
@@ -67,9 +68,9 @@ func (w *writeQuota) get(sz int32) error {
func (w *writeQuota) realReplenish(n int) {
sz := int32(n)
- a := atomic.AddInt32(&w.quota, sz)
- b := a - sz
- if b <= 0 && a > 0 {
+ newQuota := atomic.AddInt32(&w.quota, sz)
+ previousQuota := newQuota - sz
+ if previousQuota <= 0 && newQuota > 0 {
select {
case w.ch <- struct{}{}:
default:
diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
index d954a64c3..7ab3422b8 100644
--- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go
@@ -50,7 +50,7 @@ import (
// NewServerHandlerTransport returns a ServerTransport handling gRPC from
// inside an http.Handler, or writes an HTTP error to w and returns an error.
// It requires that the http Server supports HTTP/2.
-func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats []stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
+func NewServerHandlerTransport(w http.ResponseWriter, r *http.Request, stats stats.Handler, bufferPool mem.BufferPool) (ServerTransport, error) {
if r.Method != http.MethodPost {
w.Header().Set("Allow", http.MethodPost)
msg := fmt.Sprintf("invalid gRPC request method %q", r.Method)
@@ -170,7 +170,7 @@ type serverHandlerTransport struct {
// TODO make sure this is consistent across handler_server and http2_server
contentSubtype string
- stats []stats.Handler
+ stats stats.Handler
logger *grpclog.PrefixLogger
bufferPool mem.BufferPool
@@ -274,15 +274,13 @@ func (ht *serverHandlerTransport) writeStatus(s *ServerStream, st *status.Status
}
})
- if err == nil { // transport has not been closed
+ if err == nil && ht.stats != nil { // transport has not been closed
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
s.hdrMu.Lock()
- for _, sh := range ht.stats {
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
- Trailer: s.trailer.Copy(),
- })
- }
+ ht.stats.HandleRPC(s.Context(), &stats.OutTrailer{
+ Trailer: s.trailer.Copy(),
+ })
s.hdrMu.Unlock()
}
ht.Close(errors.New("finished writing status"))
@@ -374,19 +372,23 @@ func (ht *serverHandlerTransport) writeHeader(s *ServerStream, md metadata.MD) e
ht.rw.(http.Flusher).Flush()
})
- if err == nil {
- for _, sh := range ht.stats {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutHeader{
- Header: md.Copy(),
- Compression: s.sendCompress,
- })
- }
+ if err == nil && ht.stats != nil {
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ ht.stats.HandleRPC(s.Context(), &stats.OutHeader{
+ Header: md.Copy(),
+ Compression: s.sendCompress,
+ })
}
return err
}
+func (ht *serverHandlerTransport) adjustWindow(*ServerStream, uint32) {
+}
+
+func (ht *serverHandlerTransport) updateWindow(*ServerStream, uint32) {
+}
+
func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream func(*ServerStream)) {
// With this transport type there will be exactly 1 stream: this HTTP request.
var cancel context.CancelFunc
@@ -411,11 +413,9 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
ctx = metadata.NewIncomingContext(ctx, ht.headerMD)
req := ht.req
s := &ServerStream{
- Stream: &Stream{
+ Stream: Stream{
id: 0, // irrelevant
ctx: ctx,
- requestRead: func(int) {},
- buf: newRecvBuffer(),
method: req.URL.Path,
recvCompress: req.Header.Get("grpc-encoding"),
contentSubtype: ht.contentSubtype,
@@ -424,9 +424,11 @@ func (ht *serverHandlerTransport) HandleStreams(ctx context.Context, startStream
st: ht,
headerWireLength: 0, // won't have access to header wire length until golang/go#18997.
}
- s.trReader = &transportReader{
- reader: &recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: s.buf},
- windowHandler: func(int) {},
+ s.Stream.buf.init()
+ s.readRequester = s
+ s.trReader = transportReader{
+ reader: recvBufferReader{ctx: s.ctx, ctxDone: s.ctx.Done(), recv: &s.buf},
+ windowHandler: s,
}
// readerDone is closed when the Body.Read-ing goroutine exits.
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
index 7cb238794..65b4ab243 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go
@@ -44,6 +44,7 @@ import (
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
"google.golang.org/grpc/internal/proxyattributes"
+ istats "google.golang.org/grpc/internal/stats"
istatus "google.golang.org/grpc/internal/status"
isyscall "google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/internal/transport/networktype"
@@ -105,7 +106,7 @@ type http2Client struct {
kp keepalive.ClientParameters
keepaliveEnabled bool
- statsHandlers []stats.Handler
+ statsHandler stats.Handler
initialWindowSize int32
@@ -335,14 +336,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
writerDone: make(chan struct{}),
goAway: make(chan struct{}),
keepaliveDone: make(chan struct{}),
- framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize),
+ framer: newFramer(conn, writeBufSize, readBufSize, opts.SharedWriteBuffer, maxHeaderListSize, opts.BufferPool),
fc: &trInFlow{limit: uint32(icwz)},
scheme: scheme,
activeStreams: make(map[uint32]*ClientStream),
isSecure: isSecure,
perRPCCreds: perRPCCreds,
kp: kp,
- statsHandlers: opts.StatsHandlers,
+ statsHandler: istats.NewCombinedHandler(opts.StatsHandlers...),
initialWindowSize: initialWindowSize,
nextID: 1,
maxConcurrentStreams: defaultMaxStreamsClient,
@@ -386,15 +387,14 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
updateFlowControl: t.updateFlowControl,
}
}
- for _, sh := range t.statsHandlers {
- t.ctx = sh.TagConn(t.ctx, &stats.ConnTagInfo{
+ if t.statsHandler != nil {
+ t.ctx = t.statsHandler.TagConn(t.ctx, &stats.ConnTagInfo{
RemoteAddr: t.remoteAddr,
LocalAddr: t.localAddr,
})
- connBegin := &stats.ConnBegin{
+ t.statsHandler.HandleConn(t.ctx, &stats.ConnBegin{
Client: true,
- }
- sh.HandleConn(t.ctx, connBegin)
+ })
}
if t.keepaliveEnabled {
t.kpDormancyCond = sync.NewCond(&t.mu)
@@ -481,10 +481,9 @@ func NewHTTP2Client(connectCtx, ctx context.Context, addr resolver.Address, opts
func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientStream {
// TODO(zhaoq): Handle uint32 overflow of Stream.id.
s := &ClientStream{
- Stream: &Stream{
+ Stream: Stream{
method: callHdr.Method,
sendCompress: callHdr.SendCompress,
- buf: newRecvBuffer(),
contentSubtype: callHdr.ContentSubtype,
},
ct: t,
@@ -492,26 +491,21 @@ func (t *http2Client) newStream(ctx context.Context, callHdr *CallHdr) *ClientSt
headerChan: make(chan struct{}),
doneFunc: callHdr.DoneFunc,
}
- s.wq = newWriteQuota(defaultWriteQuota, s.done)
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
+ s.Stream.buf.init()
+ s.Stream.wq.init(defaultWriteQuota, s.done)
+ s.readRequester = s
// The client side stream context should have exactly the same life cycle with the user provided context.
// That means, s.ctx should be read-only. And s.ctx is done iff ctx is done.
// So we use the original context here instead of creating a copy.
s.ctx = ctx
- s.trReader = &transportReader{
- reader: &recvBufferReader{
- ctx: s.ctx,
- ctxDone: s.ctx.Done(),
- recv: s.buf,
- closeStream: func(err error) {
- s.Close(err)
- },
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
+ s.trReader = transportReader{
+ reader: recvBufferReader{
+ ctx: s.ctx,
+ ctxDone: s.ctx.Done(),
+ recv: &s.buf,
+ clientStream: s,
},
+ windowHandler: s,
}
return s
}
@@ -823,7 +817,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil
},
onOrphaned: cleanup,
- wq: s.wq,
+ wq: &s.wq,
}
firstTry := true
var ch chan struct{}
@@ -854,7 +848,7 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
transportDrainRequired = t.nextID > MaxStreamID
s.id = hdr.streamID
- s.fc = &inFlow{limit: uint32(t.initialWindowSize)}
+ s.fc = inFlow{limit: uint32(t.initialWindowSize)}
t.activeStreams[s.id] = s
t.mu.Unlock()
@@ -905,27 +899,23 @@ func (t *http2Client) NewStream(ctx context.Context, callHdr *CallHdr) (*ClientS
return nil, &NewStreamError{Err: ErrConnClosing, AllowTransparentRetry: true}
}
}
- if len(t.statsHandlers) != 0 {
+ if t.statsHandler != nil {
header, ok := metadata.FromOutgoingContext(ctx)
if ok {
header.Set("user-agent", t.userAgent)
} else {
header = metadata.Pairs("user-agent", t.userAgent)
}
- for _, sh := range t.statsHandlers {
- // Note: The header fields are compressed with hpack after this call returns.
- // No WireLength field is set here.
- // Note: Creating a new stats object to prevent pollution.
- outHeader := &stats.OutHeader{
- Client: true,
- FullMethod: callHdr.Method,
- RemoteAddr: t.remoteAddr,
- LocalAddr: t.localAddr,
- Compression: callHdr.SendCompress,
- Header: header,
- }
- sh.HandleRPC(s.ctx, outHeader)
- }
+ // Note: The header fields are compressed with hpack after this call returns.
+ // No WireLength field is set here.
+ t.statsHandler.HandleRPC(s.ctx, &stats.OutHeader{
+ Client: true,
+ FullMethod: callHdr.Method,
+ RemoteAddr: t.remoteAddr,
+ LocalAddr: t.localAddr,
+ Compression: callHdr.SendCompress,
+ Header: header,
+ })
}
if transportDrainRequired {
if t.logger.V(logLevel) {
@@ -1002,6 +992,9 @@ func (t *http2Client) closeStream(s *ClientStream, err error, rst bool, rstCode
// accessed anymore.
func (t *http2Client) Close(err error) {
t.conn.SetWriteDeadline(time.Now().Add(time.Second * 10))
+ // For background on the deadline value chosen here, see
+ // https://github.com/grpc/grpc-go/issues/8425#issuecomment-3057938248 .
+ t.conn.SetReadDeadline(time.Now().Add(time.Second))
t.mu.Lock()
// Make sure we only close once.
if t.state == closing {
@@ -1063,11 +1056,10 @@ func (t *http2Client) Close(err error) {
for _, s := range streams {
t.closeStream(s, err, false, http2.ErrCodeNo, st, nil, false)
}
- for _, sh := range t.statsHandlers {
- connEnd := &stats.ConnEnd{
+ if t.statsHandler != nil {
+ t.statsHandler.HandleConn(t.ctx, &stats.ConnEnd{
Client: true,
- }
- sh.HandleConn(t.ctx, connEnd)
+ })
}
}
@@ -1178,7 +1170,7 @@ func (t *http2Client) updateFlowControl(n uint32) {
})
}
-func (t *http2Client) handleData(f *http2.DataFrame) {
+func (t *http2Client) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -1222,22 +1214,15 @@ func (t *http2Client) handleData(f *http2.DataFrame) {
t.closeStream(s, io.EOF, true, http2.ErrCodeFlowControl, status.New(codes.Internal, err.Error()), nil, false)
return
}
+ dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+ if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+ if dataLen > 0 {
+ f.data.Ref()
+ s.write(recvMsg{buffer: f.data})
}
}
// The server has closed the stream without sending trailers. Record that
@@ -1477,17 +1462,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
contentTypeErr = "malformed header: missing HTTP content-type"
grpcMessage string
recvCompress string
- httpStatusCode *int
httpStatusErr string
- rawStatusCode = codes.Unknown
+ // the code from the grpc-status header, if present
+ grpcStatusCode = codes.Unknown
// headerError is set if an error is encountered while parsing the headers
headerError string
+ httpStatus string
)
- if initialHeader {
- httpStatusErr = "malformed header: missing HTTP status"
- }
-
for _, hf := range frame.Fields {
switch hf.Name {
case "content-type":
@@ -1507,69 +1489,71 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
- rawStatusCode = codes.Code(uint32(code))
+ grpcStatusCode = codes.Code(uint32(code))
case "grpc-message":
grpcMessage = decodeGrpcMessage(hf.Value)
case ":status":
- c, err := strconv.ParseInt(hf.Value, 10, 32)
+ httpStatus = hf.Value
+ default:
+ if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
+ break
+ }
+ v, err := decodeMetadataHeader(hf.Name, hf.Value)
if err != nil {
- se := status.New(codes.Internal, fmt.Sprintf("transport: malformed http-status: %v", err))
+ headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
+ logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
+ break
+ }
+ mdata[hf.Name] = append(mdata[hf.Name], v)
+ }
+ }
+
+ // If a non-gRPC response is received, then evaluate the HTTP status to
+ // process the response and close the stream.
+ // In case http status doesn't provide any error information (status : 200),
+ // then evalute response code to be Unknown.
+ if !isGRPC {
+ var grpcErrorCode = codes.Internal
+ if httpStatus == "" {
+ httpStatusErr = "malformed header: missing HTTP status"
+ } else {
+ // Parse the status codes (e.g. "200", 404").
+ statusCode, err := strconv.Atoi(httpStatus)
+ if err != nil {
+ se := status.New(grpcErrorCode, fmt.Sprintf("transport: malformed http-status: %v", err))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
- statusCode := int(c)
if statusCode >= 100 && statusCode < 200 {
if endStream {
se := status.New(codes.Internal, fmt.Sprintf(
"protocol error: informational header with status code %d must not have END_STREAM set", statusCode))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
}
+ // In case of informational headers, return.
return
}
- httpStatusCode = &statusCode
- if statusCode == 200 {
- httpStatusErr = ""
- break
- }
-
httpStatusErr = fmt.Sprintf(
"unexpected HTTP status code received from server: %d (%s)",
statusCode,
http.StatusText(statusCode),
)
- default:
- if isReservedHeader(hf.Name) && !isWhitelistedHeader(hf.Name) {
- break
- }
- v, err := decodeMetadataHeader(hf.Name, hf.Value)
- if err != nil {
- headerError = fmt.Sprintf("transport: malformed %s: %v", hf.Name, err)
- logger.Warningf("Failed to decode metadata header (%q, %q): %v", hf.Name, hf.Value, err)
- break
- }
- mdata[hf.Name] = append(mdata[hf.Name], v)
- }
- }
-
- if !isGRPC || httpStatusErr != "" {
- var code = codes.Internal // when header does not include HTTP status, return INTERNAL
-
- if httpStatusCode != nil {
var ok bool
- code, ok = HTTPStatusConvTab[*httpStatusCode]
+ grpcErrorCode, ok = HTTPStatusConvTab[statusCode]
if !ok {
- code = codes.Unknown
+ grpcErrorCode = codes.Unknown
}
}
var errs []string
if httpStatusErr != "" {
errs = append(errs, httpStatusErr)
}
+
if contentTypeErr != "" {
errs = append(errs, contentTypeErr)
}
- // Verify the HTTP response is a 200.
- se := status.New(code, strings.Join(errs, "; "))
+
+ se := status.New(grpcErrorCode, strings.Join(errs, "; "))
t.closeStream(s, se.Err(), true, http2.ErrCodeProtocol, se, nil, endStream)
return
}
@@ -1600,22 +1584,20 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}
}
- for _, sh := range t.statsHandlers {
+ if t.statsHandler != nil {
if !endStream {
- inHeader := &stats.InHeader{
+ t.statsHandler.HandleRPC(s.ctx, &stats.InHeader{
Client: true,
WireLength: int(frame.Header().Length),
Header: metadata.MD(mdata).Copy(),
Compression: s.recvCompress,
- }
- sh.HandleRPC(s.ctx, inHeader)
+ })
} else {
- inTrailer := &stats.InTrailer{
+ t.statsHandler.HandleRPC(s.ctx, &stats.InTrailer{
Client: true,
WireLength: int(frame.Header().Length),
Trailer: metadata.MD(mdata).Copy(),
- }
- sh.HandleRPC(s.ctx, inTrailer)
+ })
}
}
@@ -1623,7 +1605,7 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
return
}
- status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
+ status := istatus.NewWithProto(grpcStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader])
// If client received END_STREAM from server while stream was still active,
// send RST_STREAM.
@@ -1670,7 +1652,7 @@ func (t *http2Client) reader(errCh chan<- error) {
// loop to keep reading incoming messages on this transport.
for {
t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
+ frame, err := t.framer.readFrame()
if t.keepaliveEnabled {
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
}
@@ -1685,7 +1667,7 @@ func (t *http2Client) reader(errCh chan<- error) {
if s != nil {
// use error detail to provide better err message
code := http2ErrConvTab[se.Code]
- errorDetail := t.framer.fr.ErrorDetail()
+ errorDetail := t.framer.errorDetail()
var msg string
if errorDetail != nil {
msg = errorDetail.Error()
@@ -1703,8 +1685,9 @@ func (t *http2Client) reader(errCh chan<- error) {
switch frame := frame.(type) {
case *http2.MetaHeadersFrame:
t.operateHeaders(frame)
- case *http2.DataFrame:
+ case *parsedDataFrame:
t.handleData(frame)
+ frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index 83cee314c..6f78a6b0c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -35,6 +35,8 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
+ "google.golang.org/protobuf/proto"
+
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/grpclog"
"google.golang.org/grpc/internal/grpcutil"
@@ -42,7 +44,6 @@ import (
istatus "google.golang.org/grpc/internal/status"
"google.golang.org/grpc/internal/syscall"
"google.golang.org/grpc/mem"
- "google.golang.org/protobuf/proto"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
@@ -86,7 +87,7 @@ type http2Server struct {
// updates, reset streams, and various settings) to the controller.
controlBuf *controlBuffer
fc *trInFlow
- stats []stats.Handler
+ stats stats.Handler
// Keepalive and max-age parameters for the server.
kp keepalive.ServerParameters
// Keepalive enforcement policy.
@@ -168,7 +169,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
if config.MaxHeaderListSize != nil {
maxHeaderListSize = *config.MaxHeaderListSize
}
- framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize)
+ framer := newFramer(conn, writeBufSize, readBufSize, config.SharedWriteBuffer, maxHeaderListSize, config.BufferPool)
// Send initial settings as connection preface to client.
isettings := []http2.Setting{{
ID: http2.SettingMaxFrameSize,
@@ -260,7 +261,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport,
fc: &trInFlow{limit: uint32(icwz)},
state: reachable,
activeStreams: make(map[uint32]*ServerStream),
- stats: config.StatsHandlers,
+ stats: config.StatsHandler,
kp: kp,
idle: time.Now(),
kep: kep,
@@ -390,16 +391,15 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
}
t.maxStreamID = streamID
- buf := newRecvBuffer()
s := &ServerStream{
- Stream: &Stream{
- id: streamID,
- buf: buf,
- fc: &inFlow{limit: uint32(t.initialWindowSize)},
+ Stream: Stream{
+ id: streamID,
+ fc: inFlow{limit: uint32(t.initialWindowSize)},
},
st: t,
headerWireLength: int(frame.Header().Length),
}
+ s.Stream.buf.init()
var (
// if false, content-type was missing or invalid
isGRPC = false
@@ -640,25 +640,21 @@ func (t *http2Server) operateHeaders(ctx context.Context, frame *http2.MetaHeade
t.channelz.SocketMetrics.StreamsStarted.Add(1)
t.channelz.SocketMetrics.LastRemoteStreamCreatedTimestamp.Store(time.Now().UnixNano())
}
- s.requestRead = func(n int) {
- t.adjustWindow(s, uint32(n))
- }
+ s.readRequester = s
s.ctxDone = s.ctx.Done()
- s.wq = newWriteQuota(defaultWriteQuota, s.ctxDone)
- s.trReader = &transportReader{
- reader: &recvBufferReader{
+ s.Stream.wq.init(defaultWriteQuota, s.ctxDone)
+ s.trReader = transportReader{
+ reader: recvBufferReader{
ctx: s.ctx,
ctxDone: s.ctxDone,
- recv: s.buf,
- },
- windowHandler: func(n int) {
- t.updateWindow(s, uint32(n))
+ recv: &s.buf,
},
+ windowHandler: s,
}
// Register the stream with loopy.
t.controlBuf.put(®isterStream{
streamID: s.id,
- wq: s.wq,
+ wq: &s.wq,
})
handle(s)
return nil
@@ -674,7 +670,7 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
}()
for {
t.controlBuf.throttle()
- frame, err := t.framer.fr.ReadFrame()
+ frame, err := t.framer.readFrame()
atomic.StoreInt64(&t.lastRead, time.Now().UnixNano())
if err != nil {
if se, ok := err.(http2.StreamError); ok {
@@ -711,8 +707,9 @@ func (t *http2Server) HandleStreams(ctx context.Context, handle func(*ServerStre
})
continue
}
- case *http2.DataFrame:
+ case *parsedDataFrame:
t.handleData(frame)
+ frame.data.Free()
case *http2.RSTStreamFrame:
t.handleRSTStream(frame)
case *http2.SettingsFrame:
@@ -792,7 +789,7 @@ func (t *http2Server) updateFlowControl(n uint32) {
}
-func (t *http2Server) handleData(f *http2.DataFrame) {
+func (t *http2Server) handleData(f *parsedDataFrame) {
size := f.Header().Length
var sendBDPPing bool
if t.bdpEst != nil {
@@ -837,22 +834,15 @@ func (t *http2Server) handleData(f *http2.DataFrame) {
t.closeStream(s, true, http2.ErrCodeFlowControl, false)
return
}
+ dataLen := f.data.Len()
if f.Header().Flags.Has(http2.FlagDataPadded) {
- if w := s.fc.onRead(size - uint32(len(f.Data()))); w > 0 {
+ if w := s.fc.onRead(size - uint32(dataLen)); w > 0 {
t.controlBuf.put(&outgoingWindowUpdate{s.id, w})
}
}
- // TODO(bradfitz, zhaoq): A copy is required here because there is no
- // guarantee f.Data() is consumed before the arrival of next frame.
- // Can this copy be eliminated?
- if len(f.Data()) > 0 {
- pool := t.bufferPool
- if pool == nil {
- // Note that this is only supposed to be nil in tests. Otherwise, stream is
- // always initialized with a BufferPool.
- pool = mem.DefaultBufferPool()
- }
- s.write(recvMsg{buffer: mem.Copy(f.Data(), pool)})
+ if dataLen > 0 {
+ f.data.Ref()
+ s.write(recvMsg{buffer: f.data})
}
}
if f.StreamEnded() {
@@ -1059,14 +1049,13 @@ func (t *http2Server) writeHeaderLocked(s *ServerStream) error {
t.closeStream(s, true, http2.ErrCodeInternal, false)
return ErrHeaderListSizeLimitViolation
}
- for _, sh := range t.stats {
+ if t.stats != nil {
// Note: Headers are compressed with hpack after this call returns.
// No WireLength field is set here.
- outHeader := &stats.OutHeader{
+ t.stats.HandleRPC(s.Context(), &stats.OutHeader{
Header: s.header.Copy(),
Compression: s.sendCompress,
- }
- sh.HandleRPC(s.Context(), outHeader)
+ })
}
return nil
}
@@ -1134,10 +1123,10 @@ func (t *http2Server) writeStatus(s *ServerStream, st *status.Status) error {
// Send a RST_STREAM after the trailers if the client has not already half-closed.
rst := s.getState() == streamActive
t.finishStream(s, rst, http2.ErrCodeNo, trailingHeader, true)
- for _, sh := range t.stats {
+ if t.stats != nil {
// Note: The trailer fields are compressed with hpack after this call returns.
// No WireLength field is set here.
- sh.HandleRPC(s.Context(), &stats.OutTrailer{
+ t.stats.HandleRPC(s.Context(), &stats.OutTrailer{
Trailer: s.trailer.Copy(),
})
}
@@ -1305,7 +1294,8 @@ func (t *http2Server) Close(err error) {
// deleteStream deletes the stream s from transport's active streams.
func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
t.mu.Lock()
- if _, ok := t.activeStreams[s.id]; ok {
+ _, isActive := t.activeStreams[s.id]
+ if isActive {
delete(t.activeStreams, s.id)
if len(t.activeStreams) == 0 {
t.idle = time.Now()
@@ -1313,7 +1303,7 @@ func (t *http2Server) deleteStream(s *ServerStream, eosReceived bool) {
}
t.mu.Unlock()
- if channelz.IsOn() {
+ if isActive && channelz.IsOn() {
if eosReceived {
t.channelz.SocketMetrics.StreamsSucceeded.Add(1)
} else {
diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go
index e3663f87f..6209eb23c 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http_util.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go
@@ -25,7 +25,6 @@ import (
"fmt"
"io"
"math"
- "net"
"net/http"
"net/url"
"strconv"
@@ -37,6 +36,7 @@ import (
"golang.org/x/net/http2"
"golang.org/x/net/http2/hpack"
"google.golang.org/grpc/codes"
+ "google.golang.org/grpc/mem"
)
const (
@@ -300,11 +300,11 @@ type bufWriter struct {
buf []byte
offset int
batchSize int
- conn net.Conn
+ conn io.Writer
err error
}
-func newBufWriter(conn net.Conn, batchSize int, pool *sync.Pool) *bufWriter {
+func newBufWriter(conn io.Writer, batchSize int, pool *sync.Pool) *bufWriter {
w := &bufWriter{
batchSize: batchSize,
conn: conn,
@@ -388,15 +388,35 @@ func toIOError(err error) error {
return ioError{error: err}
}
+type parsedDataFrame struct {
+ http2.FrameHeader
+ data mem.Buffer
+}
+
+func (df *parsedDataFrame) StreamEnded() bool {
+ return df.FrameHeader.Flags.Has(http2.FlagDataEndStream)
+}
+
type framer struct {
- writer *bufWriter
- fr *http2.Framer
+ writer *bufWriter
+ fr *http2.Framer
+ headerBuf []byte // cached slice for framer headers to reduce heap allocs.
+ reader io.Reader
+ dataFrame parsedDataFrame // Cached data frame to avoid heap allocations.
+ pool mem.BufferPool
+ errDetail error
}
var writeBufferPoolMap = make(map[int]*sync.Pool)
var writeBufferMutex sync.Mutex
-func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32) *framer {
+func newFramer(conn io.ReadWriter, writeBufferSize, readBufferSize int, sharedWriteBuffer bool, maxHeaderListSize uint32, memPool mem.BufferPool) *framer {
+ if memPool == nil {
+ // Note that this is only supposed to be nil in tests. Otherwise, stream
+ // is always initialized with a BufferPool.
+ memPool = mem.DefaultBufferPool()
+ }
+
if writeBufferSize < 0 {
writeBufferSize = 0
}
@@ -412,6 +432,8 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
f := &framer{
writer: w,
fr: http2.NewFramer(w, r),
+ reader: r,
+ pool: memPool,
}
f.fr.SetMaxReadFrameSize(http2MaxFrameLen)
// Opt-in to Frame reuse API on framer to reduce garbage.
@@ -422,6 +444,146 @@ func newFramer(conn net.Conn, writeBufferSize, readBufferSize int, sharedWriteBu
return f
}
+// writeData writes a DATA frame.
+//
+// It is the caller's responsibility not to violate the maximum frame size.
+func (f *framer) writeData(streamID uint32, endStream bool, data [][]byte) error {
+ var flags http2.Flags
+ if endStream {
+ flags = http2.FlagDataEndStream
+ }
+ length := uint32(0)
+ for _, d := range data {
+ length += uint32(len(d))
+ }
+ // TODO: Replace the header write with the framer API being added in
+ // https://github.com/golang/go/issues/66655.
+ f.headerBuf = append(f.headerBuf[:0],
+ byte(length>>16),
+ byte(length>>8),
+ byte(length),
+ byte(http2.FrameData),
+ byte(flags),
+ byte(streamID>>24),
+ byte(streamID>>16),
+ byte(streamID>>8),
+ byte(streamID))
+ if _, err := f.writer.Write(f.headerBuf); err != nil {
+ return err
+ }
+ for _, d := range data {
+ if _, err := f.writer.Write(d); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// readFrame reads a single frame. The returned Frame is only valid
+// until the next call to readFrame.
+func (f *framer) readFrame() (any, error) {
+ f.errDetail = nil
+ fh, err := f.fr.ReadFrameHeader()
+ if err != nil {
+ f.errDetail = f.fr.ErrorDetail()
+ return nil, err
+ }
+ // Read the data frame directly from the underlying io.Reader to avoid
+ // copies.
+ if fh.Type == http2.FrameData {
+ err = f.readDataFrame(fh)
+ return &f.dataFrame, err
+ }
+ fr, err := f.fr.ReadFrameForHeader(fh)
+ if err != nil {
+ f.errDetail = f.fr.ErrorDetail()
+ return nil, err
+ }
+ return fr, err
+}
+
+// errorDetail returns a more detailed error of the last error
+// returned by framer.readFrame. For instance, if readFrame
+// returns a StreamError with code PROTOCOL_ERROR, errorDetail
+// will say exactly what was invalid. errorDetail is not guaranteed
+// to return a non-nil value.
+// errorDetail is reset after the next call to readFrame.
+func (f *framer) errorDetail() error {
+ return f.errDetail
+}
+
+func (f *framer) readDataFrame(fh http2.FrameHeader) (err error) {
+ if fh.StreamID == 0 {
+ // DATA frames MUST be associated with a stream. If a
+ // DATA frame is received whose stream identifier
+ // field is 0x0, the recipient MUST respond with a
+ // connection error (Section 5.4.1) of type
+ // PROTOCOL_ERROR.
+ f.errDetail = errors.New("DATA frame with stream ID 0")
+ return http2.ConnectionError(http2.ErrCodeProtocol)
+ }
+ // Converting a *[]byte to a mem.SliceBuffer incurs a heap allocation. This
+ // conversion is performed by mem.NewBuffer. To avoid the extra allocation
+ // a []byte is allocated directly if required and cast to a mem.SliceBuffer.
+ var buf []byte
+ // poolHandle is the pointer returned by the buffer pool (if it's used.).
+ var poolHandle *[]byte
+ useBufferPool := !mem.IsBelowBufferPoolingThreshold(int(fh.Length))
+ if useBufferPool {
+ poolHandle = f.pool.Get(int(fh.Length))
+ buf = *poolHandle
+ defer func() {
+ if err != nil {
+ f.pool.Put(poolHandle)
+ }
+ }()
+ } else {
+ buf = make([]byte, int(fh.Length))
+ }
+ if fh.Flags.Has(http2.FlagDataPadded) {
+ if fh.Length == 0 {
+ return io.ErrUnexpectedEOF
+ }
+ // This initial 1-byte read can be inefficient for unbuffered readers,
+ // but it allows the rest of the payload to be read directly to the
+ // start of the destination slice. This makes it easy to return the
+ // original slice back to the buffer pool.
+ if _, err := io.ReadFull(f.reader, buf[:1]); err != nil {
+ return err
+ }
+ padSize := buf[0]
+ buf = buf[:len(buf)-1]
+ if int(padSize) > len(buf) {
+ // If the length of the padding is greater than the
+ // length of the frame payload, the recipient MUST
+ // treat this as a connection error.
+ // Filed: https://github.com/http2/http2-spec/issues/610
+ f.errDetail = errors.New("pad size larger than data payload")
+ return http2.ConnectionError(http2.ErrCodeProtocol)
+ }
+ if _, err := io.ReadFull(f.reader, buf); err != nil {
+ return err
+ }
+ buf = buf[:len(buf)-int(padSize)]
+ } else if _, err := io.ReadFull(f.reader, buf); err != nil {
+ return err
+ }
+
+ f.dataFrame.FrameHeader = fh
+ if useBufferPool {
+ // Update the handle to point to the (potentially re-sliced) buf.
+ *poolHandle = buf
+ f.dataFrame.data = mem.NewBuffer(poolHandle, f.pool)
+ } else {
+ f.dataFrame.data = mem.SliceBuffer(buf)
+ }
+ return nil
+}
+
+func (df *parsedDataFrame) Header() http2.FrameHeader {
+ return df.FrameHeader
+}
+
func getWriteBufferPool(size int) *sync.Pool {
writeBufferMutex.Lock()
defer writeBufferMutex.Unlock()
diff --git a/vendor/google.golang.org/grpc/internal/transport/server_stream.go b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
index cf8da0b52..ed6a13b75 100644
--- a/vendor/google.golang.org/grpc/internal/transport/server_stream.go
+++ b/vendor/google.golang.org/grpc/internal/transport/server_stream.go
@@ -32,7 +32,7 @@ import (
// ServerStream implements streaming functionality for a gRPC server.
type ServerStream struct {
- *Stream // Embed for common stream functionality.
+ Stream // Embed for common stream functionality.
st internalServerTransport
ctxDone <-chan struct{} // closed at the end of stream. Cache of ctx.Done() (for performance)
@@ -43,12 +43,13 @@ type ServerStream struct {
// Holds compressor names passed in grpc-accept-encoding metadata from the
// client.
clientAdvertisedCompressors string
- headerWireLength int
// hdrMu protects outgoing header and trailer metadata.
hdrMu sync.Mutex
header metadata.MD // the outgoing header metadata. Updated by WriteHeader.
headerSent atomic.Bool // atomically set when the headers are sent out.
+
+ headerWireLength int
}
// Read reads an n byte message from the input stream.
@@ -178,3 +179,11 @@ func (s *ServerStream) SetTrailer(md metadata.MD) error {
s.hdrMu.Unlock()
return nil
}
+
+func (s *ServerStream) requestRead(n int) {
+ s.st.adjustWindow(s, uint32(n))
+}
+
+func (s *ServerStream) updateWindow(n int) {
+ s.st.updateWindow(s, uint32(n))
+}
diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go
index 7dd53e80a..5ff83a7d7 100644
--- a/vendor/google.golang.org/grpc/internal/transport/transport.go
+++ b/vendor/google.golang.org/grpc/internal/transport/transport.go
@@ -68,11 +68,11 @@ type recvBuffer struct {
err error
}
-func newRecvBuffer() *recvBuffer {
- b := &recvBuffer{
- c: make(chan recvMsg, 1),
- }
- return b
+// init allows a recvBuffer to be initialized in-place, which is useful
+// for resetting a buffer or for avoiding a heap allocation when the buffer
+// is embedded in another struct.
+func (b *recvBuffer) init() {
+ b.c = make(chan recvMsg, 1)
}
func (b *recvBuffer) put(r recvMsg) {
@@ -123,12 +123,13 @@ func (b *recvBuffer) get() <-chan recvMsg {
// recvBufferReader implements io.Reader interface to read the data from
// recvBuffer.
type recvBufferReader struct {
- closeStream func(error) // Closes the client transport stream with the given error and nil trailer metadata.
- ctx context.Context
- ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
- recv *recvBuffer
- last mem.Buffer // Stores the remaining data in the previous calls.
- err error
+ _ noCopy
+ clientStream *ClientStream // The client transport stream is closed with a status representing ctx.Err() and nil trailer metadata.
+ ctx context.Context
+ ctxDone <-chan struct{} // cache of ctx.Done() (for performance).
+ recv *recvBuffer
+ last mem.Buffer // Stores the remaining data in the previous calls.
+ err error
}
func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
@@ -139,7 +140,7 @@ func (r *recvBufferReader) ReadMessageHeader(header []byte) (n int, err error) {
n, r.last = mem.ReadUnsafe(header, r.last)
return n, nil
}
- if r.closeStream != nil {
+ if r.clientStream != nil {
n, r.err = r.readMessageHeaderClient(header)
} else {
n, r.err = r.readMessageHeader(header)
@@ -164,7 +165,7 @@ func (r *recvBufferReader) Read(n int) (buf mem.Buffer, err error) {
}
return buf, nil
}
- if r.closeStream != nil {
+ if r.clientStream != nil {
buf, r.err = r.readClient(n)
} else {
buf, r.err = r.read(n)
@@ -209,7 +210,7 @@ func (r *recvBufferReader) readMessageHeaderClient(header []byte) (n int, err er
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
- r.closeStream(ContextErr(r.ctx.Err()))
+ r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readMessageHeaderAdditional(m, header)
case m := <-r.recv.get():
@@ -236,7 +237,7 @@ func (r *recvBufferReader) readClient(n int) (buf mem.Buffer, err error) {
// TODO: delaying ctx error seems like a unnecessary side effect. What
// we really want is to mark the stream as done, and return ctx error
// faster.
- r.closeStream(ContextErr(r.ctx.Err()))
+ r.clientStream.Close(ContextErr(r.ctx.Err()))
m := <-r.recv.get()
return r.readAdditional(m, n)
case m := <-r.recv.get():
@@ -285,27 +286,32 @@ const (
// Stream represents an RPC in the transport layer.
type Stream struct {
- id uint32
ctx context.Context // the associated context of the stream
method string // the associated RPC method of the stream
recvCompress string
sendCompress string
- buf *recvBuffer
- trReader *transportReader
- fc *inFlow
- wq *writeQuota
-
- // Callback to state application's intentions to read data. This
- // is used to adjust flow control, if needed.
- requestRead func(int)
- state streamState
+ readRequester readRequester
// contentSubtype is the content-subtype for requests.
// this must be lowercase or the behavior is undefined.
contentSubtype string
trailer metadata.MD // the key-value map of trailer metadata.
+
+ // Non-pointer fields are at the end to optimize GC performance.
+ state streamState
+ id uint32
+ buf recvBuffer
+ trReader transportReader
+ fc inFlow
+ wq writeQuota
+}
+
+// readRequester is used to state application's intentions to read data. This
+// is used to adjust flow control, if needed.
+type readRequester interface {
+ requestRead(int)
}
func (s *Stream) swapState(st streamState) streamState {
@@ -355,7 +361,7 @@ func (s *Stream) ReadMessageHeader(header []byte) (err error) {
if er := s.trReader.er; er != nil {
return er
}
- s.requestRead(len(header))
+ s.readRequester.requestRead(len(header))
for len(header) != 0 {
n, err := s.trReader.ReadMessageHeader(header)
header = header[n:]
@@ -378,7 +384,7 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
if er := s.trReader.er; er != nil {
return nil, er
}
- s.requestRead(n)
+ s.readRequester.requestRead(n)
for n != 0 {
buf, err := s.trReader.Read(n)
var bufLen int
@@ -401,16 +407,34 @@ func (s *Stream) read(n int) (data mem.BufferSlice, err error) {
return data, nil
}
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct {
+}
+
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
// transportReader reads all the data available for this Stream from the transport and
// passes them into the decoder, which converts them into a gRPC message stream.
// The error is io.EOF when the stream is done or another non-nil error if
// the stream broke.
type transportReader struct {
- reader *recvBufferReader
+ _ noCopy
// The handler to control the window update procedure for both this
// particular stream and the associated transport.
- windowHandler func(int)
+ windowHandler windowHandler
er error
+ reader recvBufferReader
+}
+
+// The handler to control the window update procedure for both this
+// particular stream and the associated transport.
+type windowHandler interface {
+ updateWindow(int)
}
func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
@@ -419,7 +443,7 @@ func (t *transportReader) ReadMessageHeader(header []byte) (int, error) {
t.er = err
return 0, err
}
- t.windowHandler(n)
+ t.windowHandler.updateWindow(n)
return n, nil
}
@@ -429,7 +453,7 @@ func (t *transportReader) Read(n int) (mem.Buffer, error) {
t.er = err
return buf, err
}
- t.windowHandler(buf.Len())
+ t.windowHandler.updateWindow(buf.Len())
return buf, nil
}
@@ -454,7 +478,7 @@ type ServerConfig struct {
ConnectionTimeout time.Duration
Credentials credentials.TransportCredentials
InTapHandle tap.ServerInHandle
- StatsHandlers []stats.Handler
+ StatsHandler stats.Handler
KeepaliveParams keepalive.ServerParameters
KeepalivePolicy keepalive.EnforcementPolicy
InitialWindowSize int32
@@ -615,6 +639,8 @@ type internalServerTransport interface {
write(s *ServerStream, hdr []byte, data mem.BufferSlice, opts *WriteOptions) error
writeStatus(s *ServerStream, st *status.Status) error
incrMsgRecv()
+ adjustWindow(s *ServerStream, n uint32)
+ updateWindow(s *ServerStream, n uint32)
}
// connectionErrorf creates an ConnectionError with the specified error description.
diff --git a/vendor/google.golang.org/grpc/mem/buffer_pool.go b/vendor/google.golang.org/grpc/mem/buffer_pool.go
index c37c58c02..f211e7274 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_pool.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_pool.go
@@ -32,6 +32,9 @@ type BufferPool interface {
Get(length int) *[]byte
// Put returns a buffer to the pool.
+ //
+ // The provided pointer must hold a prefix of the buffer obtained via
+ // BufferPool.Get to ensure the buffer's entire capacity can be re-used.
Put(*[]byte)
}
@@ -118,7 +121,11 @@ type sizedBufferPool struct {
}
func (p *sizedBufferPool) Get(size int) *[]byte {
- buf := p.pool.Get().(*[]byte)
+ buf, ok := p.pool.Get().(*[]byte)
+ if !ok {
+ buf := make([]byte, size, p.defaultSize)
+ return &buf
+ }
b := *buf
clear(b[:cap(b)])
*buf = b[:size]
@@ -137,12 +144,6 @@ func (p *sizedBufferPool) Put(buf *[]byte) {
func newSizedBufferPool(size int) *sizedBufferPool {
return &sizedBufferPool{
- pool: sync.Pool{
- New: func() any {
- buf := make([]byte, size)
- return &buf
- },
- },
defaultSize: size,
}
}
@@ -160,6 +161,7 @@ type simpleBufferPool struct {
func (p *simpleBufferPool) Get(size int) *[]byte {
bs, ok := p.pool.Get().(*[]byte)
if ok && cap(*bs) >= size {
+ clear((*bs)[:cap(*bs)])
*bs = (*bs)[:size]
return bs
}
diff --git a/vendor/google.golang.org/grpc/mem/buffer_slice.go b/vendor/google.golang.org/grpc/mem/buffer_slice.go
index af510d20c..084fb19c6 100644
--- a/vendor/google.golang.org/grpc/mem/buffer_slice.go
+++ b/vendor/google.golang.org/grpc/mem/buffer_slice.go
@@ -19,6 +19,7 @@
package mem
import (
+ "fmt"
"io"
)
@@ -117,43 +118,36 @@ func (s BufferSlice) MaterializeToBuffer(pool BufferPool) Buffer {
// Reader returns a new Reader for the input slice after taking references to
// each underlying buffer.
-func (s BufferSlice) Reader() Reader {
+func (s BufferSlice) Reader() *Reader {
s.Ref()
- return &sliceReader{
+ return &Reader{
data: s,
len: s.Len(),
}
}
// Reader exposes a BufferSlice's data as an io.Reader, allowing it to interface
-// with other parts systems. It also provides an additional convenience method
-// Remaining(), which returns the number of unread bytes remaining in the slice.
+// with other systems.
+//
// Buffers will be freed as they are read.
-type Reader interface {
- io.Reader
- io.ByteReader
- // Close frees the underlying BufferSlice and never returns an error. Subsequent
- // calls to Read will return (0, io.EOF).
- Close() error
- // Remaining returns the number of unread bytes remaining in the slice.
- Remaining() int
- // Reset frees the currently held buffer slice and starts reading from the
- // provided slice. This allows reusing the reader object.
- Reset(s BufferSlice)
-}
-
-type sliceReader struct {
+//
+// A Reader can be constructed from a BufferSlice; alternatively the zero value
+// of a Reader may be used after calling Reset on it.
+type Reader struct {
data BufferSlice
len int
// The index into data[0].ReadOnlyData().
bufferIdx int
}
-func (r *sliceReader) Remaining() int {
+// Remaining returns the number of unread bytes remaining in the slice.
+func (r *Reader) Remaining() int {
return r.len
}
-func (r *sliceReader) Reset(s BufferSlice) {
+// Reset frees the currently held buffer slice and starts reading from the
+// provided slice. This allows reusing the reader object.
+func (r *Reader) Reset(s BufferSlice) {
r.data.Free()
s.Ref()
r.data = s
@@ -161,14 +155,16 @@ func (r *sliceReader) Reset(s BufferSlice) {
r.bufferIdx = 0
}
-func (r *sliceReader) Close() error {
+// Close frees the underlying BufferSlice and never returns an error. Subsequent
+// calls to Read will return (0, io.EOF).
+func (r *Reader) Close() error {
r.data.Free()
r.data = nil
r.len = 0
return nil
}
-func (r *sliceReader) freeFirstBufferIfEmpty() bool {
+func (r *Reader) freeFirstBufferIfEmpty() bool {
if len(r.data) == 0 || r.bufferIdx != len(r.data[0].ReadOnlyData()) {
return false
}
@@ -179,7 +175,7 @@ func (r *sliceReader) freeFirstBufferIfEmpty() bool {
return true
}
-func (r *sliceReader) Read(buf []byte) (n int, _ error) {
+func (r *Reader) Read(buf []byte) (n int, _ error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -202,7 +198,8 @@ func (r *sliceReader) Read(buf []byte) (n int, _ error) {
return n, nil
}
-func (r *sliceReader) ReadByte() (byte, error) {
+// ReadByte reads a single byte.
+func (r *Reader) ReadByte() (byte, error) {
if r.len == 0 {
return 0, io.EOF
}
@@ -290,3 +287,59 @@ nextBuffer:
}
}
}
+
+// Discard skips the next n bytes, returning the number of bytes discarded.
+//
+// It frees buffers as they are fully consumed.
+//
+// If Discard skips fewer than n bytes, it also returns an error.
+func (r *Reader) Discard(n int) (discarded int, err error) {
+ total := n
+ for n > 0 && r.len > 0 {
+ curData := r.data[0].ReadOnlyData()
+ curSize := min(n, len(curData)-r.bufferIdx)
+ n -= curSize
+ r.len -= curSize
+ r.bufferIdx += curSize
+ if r.bufferIdx >= len(curData) {
+ r.data[0].Free()
+ r.data = r.data[1:]
+ r.bufferIdx = 0
+ }
+ }
+ discarded = total - n
+ if n > 0 {
+ return discarded, fmt.Errorf("insufficient bytes in reader")
+ }
+ return discarded, nil
+}
+
+// Peek returns the next n bytes without advancing the reader.
+//
+// Peek appends results to the provided res slice and returns the updated slice.
+// This pattern allows re-using the storage of res if it has sufficient
+// capacity.
+//
+// The returned subslices are views into the underlying buffers and are only
+// valid until the reader is advanced past the corresponding buffer.
+//
+// If Peek returns fewer than n bytes, it also returns an error.
+func (r *Reader) Peek(n int, res [][]byte) ([][]byte, error) {
+ for i := 0; n > 0 && i < len(r.data); i++ {
+ curData := r.data[i].ReadOnlyData()
+ start := 0
+ if i == 0 {
+ start = r.bufferIdx
+ }
+ curSize := min(n, len(curData)-start)
+ if curSize == 0 {
+ continue
+ }
+ res = append(res, curData[start:start+curSize])
+ n -= curSize
+ }
+ if n > 0 {
+ return nil, fmt.Errorf("insufficient bytes in reader")
+ }
+ return res, nil
+}
diff --git a/vendor/google.golang.org/grpc/preloader.go b/vendor/google.golang.org/grpc/preloader.go
index ee0ff969a..1e783febf 100644
--- a/vendor/google.golang.org/grpc/preloader.go
+++ b/vendor/google.golang.org/grpc/preloader.go
@@ -47,9 +47,6 @@ func (p *PreparedMsg) Encode(s Stream, msg any) error {
}
// check if the context has the relevant information to prepareMsg
- if rpcInfo.preloaderInfo == nil {
- return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo is nil")
- }
if rpcInfo.preloaderInfo.codec == nil {
return status.Errorf(codes.Internal, "grpc: rpcInfo.preloaderInfo.codec is nil")
}
diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go
index 47ea09f5c..6b04c9e87 100644
--- a/vendor/google.golang.org/grpc/rpc_util.go
+++ b/vendor/google.golang.org/grpc/rpc_util.go
@@ -657,8 +657,20 @@ type streamReader interface {
Read(n int) (mem.BufferSlice, error)
}
+// noCopy may be embedded into structs which must not be copied
+// after the first use.
+//
+// See https://golang.org/issues/8005#issuecomment-190753527
+// for details.
+type noCopy struct {
+}
+
+func (*noCopy) Lock() {}
+func (*noCopy) Unlock() {}
+
// parser reads complete gRPC messages from the underlying reader.
type parser struct {
+ _ noCopy
// r is the underlying reader.
// See the comment on recvMsg for the permissible
// error types.
@@ -949,7 +961,7 @@ func recv(p *parser, c baseCodec, s recvCompressor, dc Decompressor, m any, maxR
// Information about RPC
type rpcInfo struct {
failfast bool
- preloaderInfo *compressorInfo
+ preloaderInfo compressorInfo
}
// Information about Preloader
@@ -968,7 +980,7 @@ type rpcInfoContextKey struct{}
func newContextWithRPCInfo(ctx context.Context, failfast bool, codec baseCodec, cp Compressor, comp encoding.Compressor) context.Context {
return context.WithValue(ctx, rpcInfoContextKey{}, &rpcInfo{
failfast: failfast,
- preloaderInfo: &compressorInfo{
+ preloaderInfo: compressorInfo{
codec: codec,
cp: cp,
comp: comp,
diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go
index 1da2a542a..ddd377341 100644
--- a/vendor/google.golang.org/grpc/server.go
+++ b/vendor/google.golang.org/grpc/server.go
@@ -124,7 +124,8 @@ type serviceInfo struct {
// Server is a gRPC server to serve RPC requests.
type Server struct {
- opts serverOptions
+ opts serverOptions
+ statsHandler stats.Handler
mu sync.Mutex // guards following
lis map[net.Listener]bool
@@ -692,13 +693,14 @@ func NewServer(opt ...ServerOption) *Server {
o.apply(&opts)
}
s := &Server{
- lis: make(map[net.Listener]bool),
- opts: opts,
- conns: make(map[string]map[transport.ServerTransport]bool),
- services: make(map[string]*serviceInfo),
- quit: grpcsync.NewEvent(),
- done: grpcsync.NewEvent(),
- channelz: channelz.RegisterServer(""),
+ lis: make(map[net.Listener]bool),
+ opts: opts,
+ statsHandler: istats.NewCombinedHandler(opts.statsHandlers...),
+ conns: make(map[string]map[transport.ServerTransport]bool),
+ services: make(map[string]*serviceInfo),
+ quit: grpcsync.NewEvent(),
+ done: grpcsync.NewEvent(),
+ channelz: channelz.RegisterServer(""),
}
chainUnaryServerInterceptors(s)
chainStreamServerInterceptors(s)
@@ -999,7 +1001,7 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
ConnectionTimeout: s.opts.connectionTimeout,
Credentials: s.opts.creds,
InTapHandle: s.opts.inTapHandle,
- StatsHandlers: s.opts.statsHandlers,
+ StatsHandler: s.statsHandler,
KeepaliveParams: s.opts.keepaliveParams,
KeepalivePolicy: s.opts.keepalivePolicy,
InitialWindowSize: s.opts.initialWindowSize,
@@ -1036,18 +1038,18 @@ func (s *Server) newHTTP2Transport(c net.Conn) transport.ServerTransport {
func (s *Server) serveStreams(ctx context.Context, st transport.ServerTransport, rawConn net.Conn) {
ctx = transport.SetConnection(ctx, rawConn)
ctx = peer.NewContext(ctx, st.Peer())
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagConn(ctx, &stats.ConnTagInfo{
+ if s.statsHandler != nil {
+ ctx = s.statsHandler.TagConn(ctx, &stats.ConnTagInfo{
RemoteAddr: st.Peer().Addr,
LocalAddr: st.Peer().LocalAddr,
})
- sh.HandleConn(ctx, &stats.ConnBegin{})
+ s.statsHandler.HandleConn(ctx, &stats.ConnBegin{})
}
defer func() {
st.Close(errors.New("finished serving streams for the server transport"))
- for _, sh := range s.opts.statsHandlers {
- sh.HandleConn(ctx, &stats.ConnEnd{})
+ if s.statsHandler != nil {
+ s.statsHandler.HandleConn(ctx, &stats.ConnEnd{})
}
}()
@@ -1104,7 +1106,7 @@ var _ http.Handler = (*Server)(nil)
// Notice: This API is EXPERIMENTAL and may be changed or removed in a
// later release.
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- st, err := transport.NewServerHandlerTransport(w, r, s.opts.statsHandlers, s.opts.bufferPool)
+ st, err := transport.NewServerHandlerTransport(w, r, s.statsHandler, s.opts.bufferPool)
if err != nil {
// Errors returned from transport.NewServerHandlerTransport have
// already been written to w.
@@ -1198,12 +1200,8 @@ func (s *Server) sendResponse(ctx context.Context, stream *transport.ServerStrea
return status.Errorf(codes.ResourceExhausted, "grpc: trying to send message larger than max (%d vs. %d)", payloadLen, s.opts.maxSendMessageSize)
}
err = stream.Write(hdr, payload, opts)
- if err == nil {
- if len(s.opts.statsHandlers) != 0 {
- for _, sh := range s.opts.statsHandlers {
- sh.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
- }
- }
+ if err == nil && s.statsHandler != nil {
+ s.statsHandler.HandleRPC(ctx, outPayload(false, msg, dataLen, payloadLen, time.Now()))
}
return err
}
@@ -1245,16 +1243,15 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info
}
func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerStream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) {
- shs := s.opts.statsHandlers
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ sh := s.statsHandler
+ if sh != nil || trInfo != nil || channelz.IsOn() {
if channelz.IsOn() {
s.incrCallsStarted()
}
var statsBegin *stats.Begin
- for _, sh := range shs {
- beginTime := time.Now()
+ if sh != nil {
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
IsClientStream: false,
IsServerStream: false,
}
@@ -1282,7 +1279,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
trInfo.tr.Finish()
}
- for _, sh := range shs {
+ if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1379,7 +1376,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
}
var payInfo *payloadInfo
- if len(shs) != 0 || len(binlogs) != 0 {
+ if sh != nil || len(binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1405,7 +1402,7 @@ func (s *Server) processUnaryRPC(ctx context.Context, stream *transport.ServerSt
return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err)
}
- for _, sh := range shs {
+ if sh != nil {
sh.HandleRPC(ctx, &stats.InPayload{
RecvTime: time.Now(),
Payload: v,
@@ -1579,33 +1576,30 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if channelz.IsOn() {
s.incrCallsStarted()
}
- shs := s.opts.statsHandlers
+ sh := s.statsHandler
var statsBegin *stats.Begin
- if len(shs) != 0 {
- beginTime := time.Now()
+ if sh != nil {
statsBegin = &stats.Begin{
- BeginTime: beginTime,
+ BeginTime: time.Now(),
IsClientStream: sd.ClientStreams,
IsServerStream: sd.ServerStreams,
}
- for _, sh := range shs {
- sh.HandleRPC(ctx, statsBegin)
- }
+ sh.HandleRPC(ctx, statsBegin)
}
ctx = NewContextWithServerTransportStream(ctx, stream)
ss := &serverStream{
ctx: ctx,
s: stream,
- p: &parser{r: stream, bufferPool: s.opts.bufferPool},
+ p: parser{r: stream, bufferPool: s.opts.bufferPool},
codec: s.getCodec(stream.ContentSubtype()),
desc: sd,
maxReceiveMessageSize: s.opts.maxReceiveMessageSize,
maxSendMessageSize: s.opts.maxSendMessageSize,
trInfo: trInfo,
- statsHandler: shs,
+ statsHandler: sh,
}
- if len(shs) != 0 || trInfo != nil || channelz.IsOn() {
+ if sh != nil || trInfo != nil || channelz.IsOn() {
// See comment in processUnaryRPC on defers.
defer func() {
if trInfo != nil {
@@ -1619,7 +1613,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
ss.mu.Unlock()
}
- if len(shs) != 0 {
+ if sh != nil {
end := &stats.End{
BeginTime: statsBegin.BeginTime,
EndTime: time.Now(),
@@ -1627,9 +1621,7 @@ func (s *Server) processStreamingRPC(ctx context.Context, stream *transport.Serv
if err != nil && err != io.EOF {
end.Error = toRPCErr(err)
}
- for _, sh := range shs {
- sh.HandleRPC(ctx, end)
- }
+ sh.HandleRPC(ctx, end)
}
if channelz.IsOn() {
@@ -1818,19 +1810,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Ser
method := sm[pos+1:]
// FromIncomingContext is expensive: skip if there are no statsHandlers
- if len(s.opts.statsHandlers) > 0 {
+ if s.statsHandler != nil {
md, _ := metadata.FromIncomingContext(ctx)
- for _, sh := range s.opts.statsHandlers {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
- sh.HandleRPC(ctx, &stats.InHeader{
- FullMethod: stream.Method(),
- RemoteAddr: t.Peer().Addr,
- LocalAddr: t.Peer().LocalAddr,
- Compression: stream.RecvCompress(),
- WireLength: stream.HeaderWireLength(),
- Header: md,
- })
- }
+ ctx = s.statsHandler.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: stream.Method()})
+ s.statsHandler.HandleRPC(ctx, &stats.InHeader{
+ FullMethod: stream.Method(),
+ RemoteAddr: t.Peer().Addr,
+ LocalAddr: t.Peer().LocalAddr,
+ Compression: stream.RecvCompress(),
+ WireLength: stream.HeaderWireLength(),
+ Header: md,
+ })
}
// To have calls in stream callouts work. Will delete once all stats handler
// calls come from the gRPC layer.
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index 0a0af8961..ca87ff977 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -177,6 +177,8 @@ func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return cc.NewStream(ctx, desc, method, opts...)
}
+var emptyMethodConfig = serviceconfig.MethodConfig{}
+
func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, opts ...CallOption) (_ ClientStream, err error) {
// Start tracking the RPC for idleness purposes. This is where a stream is
// created for both streaming and unary RPCs, and hence is a good place to
@@ -217,7 +219,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return nil, err
}
- var mc serviceconfig.MethodConfig
+ mc := &emptyMethodConfig
var onCommit func()
newStream := func(ctx context.Context, done func()) (iresolver.ClientStream, error) {
return newClientStreamWithParams(ctx, desc, cc, method, mc, onCommit, done, nameResolutionDelayed, opts...)
@@ -240,7 +242,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
if rpcConfig.Context != nil {
ctx = rpcConfig.Context
}
- mc = rpcConfig.MethodConfig
+ mc = &rpcConfig.MethodConfig
onCommit = rpcConfig.OnCommitted
if rpcConfig.Interceptor != nil {
rpcInfo.Context = nil
@@ -258,7 +260,7 @@ func newClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, meth
return newStream(ctx, func() {})
}
-func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
+func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *ClientConn, method string, mc *serviceconfig.MethodConfig, onCommit, doneFunc func(), nameResolutionDelayed bool, opts ...CallOption) (_ iresolver.ClientStream, err error) {
callInfo := defaultCallInfo()
if mc.WaitForReady != nil {
callInfo.failFast = !*mc.WaitForReady
@@ -325,7 +327,7 @@ func newClientStreamWithParams(ctx context.Context, desc *StreamDesc, cc *Client
cs := &clientStream{
callHdr: callHdr,
ctx: ctx,
- methodConfig: &mc,
+ methodConfig: mc,
opts: opts,
callInfo: callInfo,
cc: cc,
@@ -418,19 +420,21 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
ctx := newContextWithRPCInfo(cs.ctx, cs.callInfo.failFast, cs.callInfo.codec, cs.compressorV0, cs.compressorV1)
method := cs.callHdr.Method
var beginTime time.Time
- shs := cs.cc.dopts.copts.StatsHandlers
- for _, sh := range shs {
- ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{FullMethodName: method, FailFast: cs.callInfo.failFast, NameResolutionDelay: cs.nameResolutionDelay})
+ sh := cs.cc.statsHandler
+ if sh != nil {
beginTime = time.Now()
- begin := &stats.Begin{
+ ctx = sh.TagRPC(ctx, &stats.RPCTagInfo{
+ FullMethodName: method, FailFast: cs.callInfo.failFast,
+ NameResolutionDelay: cs.nameResolutionDelay,
+ })
+ sh.HandleRPC(ctx, &stats.Begin{
Client: true,
BeginTime: beginTime,
FailFast: cs.callInfo.failFast,
IsClientStream: cs.desc.ClientStreams,
IsServerStream: cs.desc.ServerStreams,
IsTransparentRetryAttempt: isTransparent,
- }
- sh.HandleRPC(ctx, begin)
+ })
}
var trInfo *traceInfo
@@ -461,7 +465,7 @@ func (cs *clientStream) newAttemptLocked(isTransparent bool) (*csAttempt, error)
beginTime: beginTime,
cs: cs,
decompressorV0: cs.cc.dopts.dc,
- statsHandlers: shs,
+ statsHandler: sh,
trInfo: trInfo,
}, nil
}
@@ -482,10 +486,8 @@ func (a *csAttempt) getTransport() error {
if a.trInfo != nil {
a.trInfo.firstLine.SetRemoteAddr(a.transport.RemoteAddr())
}
- if pick.blocked {
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
- }
+ if pick.blocked && a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.DelayedPickComplete{})
}
return nil
}
@@ -529,7 +531,7 @@ func (a *csAttempt) newStream() error {
}
a.transportStream = s
a.ctx = s.Context()
- a.parser = &parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
+ a.parser = parser{r: s, bufferPool: a.cs.cc.dopts.copts.BufferPool}
return nil
}
@@ -601,7 +603,7 @@ type csAttempt struct {
cs *clientStream
transport transport.ClientTransport
transportStream *transport.ClientStream
- parser *parser
+ parser parser
pickResult balancer.PickResult
finished bool
@@ -615,8 +617,8 @@ type csAttempt struct {
// and cleared when the finish method is called.
trInfo *traceInfo
- statsHandlers []stats.Handler
- beginTime time.Time
+ statsHandler stats.Handler
+ beginTime time.Time
// set for newStream errors that may be transparently retried
allowTransparentRetry bool
@@ -1110,17 +1112,15 @@ func (a *csAttempt) sendMsg(m any, hdr []byte, payld mem.BufferSlice, dataLength
}
return io.EOF
}
- if len(a.statsHandlers) != 0 {
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
- }
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, outPayload(true, m, dataLength, payloadLength, time.Now()))
}
return nil
}
func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
cs := a.cs
- if len(a.statsHandlers) != 0 && payInfo == nil {
+ if a.statsHandler != nil && payInfo == nil {
payInfo = &payloadInfo{}
defer payInfo.free()
}
@@ -1141,7 +1141,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
// Only initialize this state once per stream.
a.decompressorSet = true
}
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
+ if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, payInfo, a.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := a.transportStream.Status().Err(); statusErr != nil {
return statusErr
@@ -1163,8 +1163,8 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
a.mu.Unlock()
}
- for _, sh := range a.statsHandlers {
- sh.HandleRPC(a.ctx, &stats.InPayload{
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.InPayload{
Client: true,
RecvTime: time.Now(),
Payload: m,
@@ -1179,7 +1179,7 @@ func (a *csAttempt) recvMsg(m any, payInfo *payloadInfo) (err error) {
}
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
+ if err := recv(&a.parser, cs.codec, a.transportStream, a.decompressorV0, m, *cs.callInfo.maxReceiveMessageSize, nil, a.decompressorV1, false); err == io.EOF {
return a.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
@@ -1217,15 +1217,14 @@ func (a *csAttempt) finish(err error) {
ServerLoad: balancerload.Parse(tr),
})
}
- for _, sh := range a.statsHandlers {
- end := &stats.End{
+ if a.statsHandler != nil {
+ a.statsHandler.HandleRPC(a.ctx, &stats.End{
Client: true,
BeginTime: a.beginTime,
EndTime: time.Now(),
Trailer: tr,
Error: err,
- }
- sh.HandleRPC(a.ctx, end)
+ })
}
if a.trInfo != nil && a.trInfo.tr != nil {
if err == nil {
@@ -1331,7 +1330,7 @@ func newNonRetryClientStream(ctx context.Context, desc *StreamDesc, method strin
return nil, err
}
as.transportStream = s
- as.parser = &parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
+ as.parser = parser{r: s, bufferPool: ac.dopts.copts.BufferPool}
ac.incrCallsStarted()
if desc != unaryStreamDesc {
// Listen on stream context to cleanup when the stream context is
@@ -1374,7 +1373,7 @@ type addrConnStream struct {
decompressorSet bool
decompressorV0 Decompressor
decompressorV1 encoding.Compressor
- parser *parser
+ parser parser
// mu guards finished and is held for the entire finish method.
mu sync.Mutex
@@ -1487,7 +1486,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Only initialize this state once per stream.
as.decompressorSet = true
}
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
+ if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err != nil {
if err == io.EOF {
if statusErr := as.transportStream.Status().Err(); statusErr != nil {
return statusErr
@@ -1509,7 +1508,7 @@ func (as *addrConnStream) RecvMsg(m any) (err error) {
// Special handling for non-server-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
+ if err := recv(&as.parser, as.codec, as.transportStream, as.decompressorV0, m, *as.callInfo.maxReceiveMessageSize, nil, as.decompressorV1, false); err == io.EOF {
return as.transportStream.Status().Err() // non-server streaming Recv returns nil on success
} else if err != nil {
return toRPCErr(err)
@@ -1597,7 +1596,7 @@ type ServerStream interface {
type serverStream struct {
ctx context.Context
s *transport.ServerStream
- p *parser
+ p parser
codec baseCodec
desc *StreamDesc
@@ -1614,7 +1613,7 @@ type serverStream struct {
maxSendMessageSize int
trInfo *traceInfo
- statsHandler []stats.Handler
+ statsHandler stats.Handler
binlogs []binarylog.MethodLogger
// serverHeaderBinlogged indicates whether server header has been logged. It
@@ -1750,10 +1749,8 @@ func (ss *serverStream) SendMsg(m any) (err error) {
binlog.Log(ss.ctx, sm)
}
}
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
- }
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), outPayload(false, m, dataLen, payloadLen, time.Now()))
}
return nil
}
@@ -1784,11 +1781,11 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
}()
var payInfo *payloadInfo
- if len(ss.statsHandler) != 0 || len(ss.binlogs) != 0 {
+ if ss.statsHandler != nil || len(ss.binlogs) != 0 {
payInfo = &payloadInfo{}
defer payInfo.free()
}
- if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
+ if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, payInfo, ss.decompressorV1, true); err != nil {
if err == io.EOF {
if len(ss.binlogs) != 0 {
chc := &binarylog.ClientHalfClose{}
@@ -1808,16 +1805,14 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
return toRPCErr(err)
}
ss.recvFirstMsg = true
- if len(ss.statsHandler) != 0 {
- for _, sh := range ss.statsHandler {
- sh.HandleRPC(ss.s.Context(), &stats.InPayload{
- RecvTime: time.Now(),
- Payload: m,
- Length: payInfo.uncompressedBytes.Len(),
- WireLength: payInfo.compressedLength + headerLen,
- CompressedLength: payInfo.compressedLength,
- })
- }
+ if ss.statsHandler != nil {
+ ss.statsHandler.HandleRPC(ss.s.Context(), &stats.InPayload{
+ RecvTime: time.Now(),
+ Payload: m,
+ Length: payInfo.uncompressedBytes.Len(),
+ WireLength: payInfo.compressedLength + headerLen,
+ CompressedLength: payInfo.compressedLength,
+ })
}
if len(ss.binlogs) != 0 {
cm := &binarylog.ClientMessage{
@@ -1834,7 +1829,7 @@ func (ss *serverStream) RecvMsg(m any) (err error) {
}
// Special handling for non-client-stream rpcs.
// This recv expects EOF or errors, so we don't collect inPayload.
- if err := recv(ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
+ if err := recv(&ss.p, ss.codec, ss.s, ss.decompressorV0, m, ss.maxReceiveMessageSize, nil, ss.decompressorV1, true); err == io.EOF {
return nil
} else if err != nil {
return err
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index 76f2e0d06..9e6d018fb 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.76.0"
+const Version = "1.77.0"
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 570f41083..60094766e 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -164,14 +164,14 @@ github.com/munnerz/goautoneg
# github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
## explicit
github.com/mwitkow/go-conntrack
-# github.com/netobserv/flowlogs-pipeline v1.10.0-community
+# github.com/netobserv/flowlogs-pipeline v1.10.0-community.0.20251205170812-75a990e42a64
## explicit; go 1.24.0
github.com/netobserv/flowlogs-pipeline/pkg/api
github.com/netobserv/flowlogs-pipeline/pkg/config
github.com/netobserv/flowlogs-pipeline/pkg/dsl
github.com/netobserv/flowlogs-pipeline/pkg/utils
github.com/netobserv/flowlogs-pipeline/pkg/utils/filters
-# github.com/netobserv/netobserv-ebpf-agent v1.10.0-community
+# github.com/netobserv/netobserv-ebpf-agent v1.10.0-community.0.20251125162210-4be10c36721e
## explicit; go 1.24.0
github.com/netobserv/netobserv-ebpf-agent/pkg/config
github.com/netobserv/netobserv-ebpf-agent/pkg/maps
@@ -286,8 +286,8 @@ github.com/stretchr/testify/mock
# github.com/x448/float16 v0.8.4
## explicit; go 1.11
github.com/x448/float16
-# go.opentelemetry.io/auto/sdk v1.1.0
-## explicit; go 1.22.0
+# go.opentelemetry.io/auto/sdk v1.2.1
+## explicit; go 1.24.0
go.opentelemetry.io/auto/sdk
go.opentelemetry.io/auto/sdk/internal/telemetry
# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0
@@ -296,7 +296,7 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconvutil
-# go.opentelemetry.io/otel v1.37.0
+# go.opentelemetry.io/otel v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
@@ -308,9 +308,10 @@ go.opentelemetry.io/otel/internal/global
go.opentelemetry.io/otel/propagation
go.opentelemetry.io/otel/semconv/v1.17.0
go.opentelemetry.io/otel/semconv/v1.20.0
-go.opentelemetry.io/otel/semconv/v1.26.0
go.opentelemetry.io/otel/semconv/v1.34.0
go.opentelemetry.io/otel/semconv/v1.34.0/httpconv
+go.opentelemetry.io/otel/semconv/v1.37.0
+go.opentelemetry.io/otel/semconv/v1.37.0/otelconv
# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace
@@ -322,12 +323,12 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/metric v1.37.0
+# go.opentelemetry.io/otel/metric v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
-# go.opentelemetry.io/otel/sdk v1.37.0
+# go.opentelemetry.io/otel/sdk v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/sdk
go.opentelemetry.io/otel/sdk/instrumentation
@@ -335,7 +336,8 @@ go.opentelemetry.io/otel/sdk/internal/env
go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
-# go.opentelemetry.io/otel/trace v1.37.0
+go.opentelemetry.io/otel/sdk/trace/internal/x
+# go.opentelemetry.io/otel/trace v1.38.0
## explicit; go 1.23.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
@@ -370,10 +372,10 @@ go.yaml.in/yaml/v3
# golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
## explicit; go 1.23.0
golang.org/x/exp/slices
-# golang.org/x/mod v0.28.0
+# golang.org/x/mod v0.29.0
## explicit; go 1.24.0
golang.org/x/mod/semver
-# golang.org/x/net v0.46.0
+# golang.org/x/net v0.47.0
## explicit; go 1.24.0
golang.org/x/net/html
golang.org/x/net/html/atom
@@ -392,20 +394,20 @@ golang.org/x/net/websocket
golang.org/x/oauth2
golang.org/x/oauth2/clientcredentials
golang.org/x/oauth2/internal
-# golang.org/x/sync v0.17.0
+# golang.org/x/sync v0.18.0
## explicit; go 1.24.0
golang.org/x/sync/errgroup
golang.org/x/sync/singleflight
-# golang.org/x/sys v0.37.0
+# golang.org/x/sys v0.38.0
## explicit; go 1.24.0
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.36.0
+# golang.org/x/term v0.37.0
## explicit; go 1.24.0
golang.org/x/term
-# golang.org/x/text v0.30.0
+# golang.org/x/text v0.31.0
## explicit; go 1.24.0
golang.org/x/text/encoding
golang.org/x/text/encoding/charmap
@@ -438,7 +440,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.12.0
## explicit; go 1.23.0
golang.org/x/time/rate
-# golang.org/x/tools v0.37.0
+# golang.org/x/tools v0.38.0
## explicit; go 1.24.0
golang.org/x/tools/cover
golang.org/x/tools/go/ast/edge
@@ -463,15 +465,15 @@ golang.org/x/tools/internal/versions
# gomodules.xyz/jsonpatch/v2 v2.5.0
## explicit; go 1.20
gomodules.xyz/jsonpatch/v2
-# google.golang.org/genproto/googleapis/api v0.0.0-20250804133106-a7a43d27e69b
-## explicit; go 1.23.0
+# google.golang.org/genproto/googleapis/api v0.0.0-20251022142026-3a174f9686a8
+## explicit; go 1.24.0
google.golang.org/genproto/googleapis/api/expr/v1alpha1
google.golang.org/genproto/googleapis/api/httpbody
-# google.golang.org/genproto/googleapis/rpc v0.0.0-20250804133106-a7a43d27e69b
-## explicit; go 1.23.0
+# google.golang.org/genproto/googleapis/rpc v0.0.0-20251022142026-3a174f9686a8
+## explicit; go 1.24.0
google.golang.org/genproto/googleapis/rpc/errdetails
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.76.0
+# google.golang.org/grpc v1.77.0
## explicit; go 1.24.0
google.golang.org/grpc
google.golang.org/grpc/attributes
@@ -482,7 +484,6 @@ google.golang.org/grpc/balancer/endpointsharding
google.golang.org/grpc/balancer/grpclb/state
google.golang.org/grpc/balancer/pickfirst
google.golang.org/grpc/balancer/pickfirst/internal
-google.golang.org/grpc/balancer/pickfirst/pickfirstleaf
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -492,6 +493,7 @@ google.golang.org/grpc/credentials
google.golang.org/grpc/credentials/insecure
google.golang.org/grpc/encoding
google.golang.org/grpc/encoding/gzip
+google.golang.org/grpc/encoding/internal
google.golang.org/grpc/encoding/proto
google.golang.org/grpc/experimental/stats
google.golang.org/grpc/grpclog