diff --git a/.bingo/Variables.mk b/.bingo/Variables.mk
index e48f105be..e4b41f488 100644
--- a/.bingo/Variables.mk
+++ b/.bingo/Variables.mk
@@ -41,11 +41,11 @@ $(CRD_DIFF): $(BINGO_DIR)/crd-diff.mod
@echo "(re)installing $(GOBIN)/crd-diff-v0.5.0"
@cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=crd-diff.mod -o=$(GOBIN)/crd-diff-v0.5.0 "sigs.k8s.io/crdify"
-CRD_REF_DOCS := $(GOBIN)/crd-ref-docs-v0.2.0
+CRD_REF_DOCS := $(GOBIN)/crd-ref-docs-v0.3.0
$(CRD_REF_DOCS): $(BINGO_DIR)/crd-ref-docs.mod
@# Install binary/ries using Go 1.14+ build command. This is using bwplotka/bingo-controlled, separate go module with pinned dependencies.
- @echo "(re)installing $(GOBIN)/crd-ref-docs-v0.2.0"
- @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=crd-ref-docs.mod -o=$(GOBIN)/crd-ref-docs-v0.2.0 "github.com/elastic/crd-ref-docs"
+ @echo "(re)installing $(GOBIN)/crd-ref-docs-v0.3.0"
+ @cd $(BINGO_DIR) && GOWORK=off $(GO) build -mod=mod -modfile=crd-ref-docs.mod -o=$(GOBIN)/crd-ref-docs-v0.3.0 "github.com/elastic/crd-ref-docs"
GOJQ := $(GOBIN)/gojq-v0.12.17
$(GOJQ): $(BINGO_DIR)/gojq.mod
diff --git a/.bingo/crd-ref-docs.mod b/.bingo/crd-ref-docs.mod
index 1e73dd590..3c514cb8a 100644
--- a/.bingo/crd-ref-docs.mod
+++ b/.bingo/crd-ref-docs.mod
@@ -1,5 +1,5 @@
module _ // Auto generated by https://github.com/bwplotka/bingo. DO NOT EDIT
-go 1.24.0
+go 1.25.0
-require github.com/elastic/crd-ref-docs v0.2.0
+require github.com/elastic/crd-ref-docs v0.3.0
diff --git a/.bingo/crd-ref-docs.sum b/.bingo/crd-ref-docs.sum
index b16a8d02f..9d9c37c7b 100644
--- a/.bingo/crd-ref-docs.sum
+++ b/.bingo/crd-ref-docs.sum
@@ -1,9 +1,15 @@
+dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
+dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI=
github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
+github.com/Masterminds/semver/v3 v3.4.0 h1:Zog+i5UMtVoCU8oKka5P7i9q9HgrJeGzI9SA1Xbatp0=
+github.com/Masterminds/semver/v3 v3.4.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM=
github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60=
github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o=
+github.com/Masterminds/sprig/v3 v3.3.0 h1:mQh0Yrg1XPo6vjYXgtf5OtijNAKJRNcTdOOGZe3tPhs=
+github.com/Masterminds/sprig/v3 v3.3.0/go.mod h1:Zy1iXRYNqNLUolqCpL4uhk6SHUMAOSCzdgBfDb35Lz0=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -12,14 +18,20 @@ github.com/elastic/crd-ref-docs v0.1.0 h1:Cr5kz89QB3Iuuj7dhAfLMApCrChEGAaIBTxGk/
github.com/elastic/crd-ref-docs v0.1.0/go.mod h1:X83mMBdJt05heJUYiS3T0yJ/JkCuliuhSUNav5Gjo/U=
github.com/elastic/crd-ref-docs v0.2.0 h1:U17MyGX71j4qfKTvYxbR4qZGoA1hc2thy7kseGYmP+o=
github.com/elastic/crd-ref-docs v0.2.0/go.mod h1:0bklkJhTG7nC6AVsdDi0wt5bGoqvzdZSzMMQkilZ6XM=
+github.com/elastic/crd-ref-docs v0.3.0 h1:9bGSUkBR56Z7TuDGQAu3KGbBkagwwZ6RkZmS+qvDuDM=
+github.com/elastic/crd-ref-docs v0.3.0/go.mod h1:8td3UC8CaO5M+G115O3FRKLmplmX+p0EqLMLGM6uNdk=
github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM=
github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
+github.com/fxamacker/cbor/v2 v2.9.0 h1:NpKPmjDBgUfBms6tr6JZkTHtfFGcMKsw3eGcmD/sapM=
+github.com/fxamacker/cbor/v2 v2.9.0/go.mod h1:vM4b+DJCtHn+zz7h3FFp/hDAI9WNWCsZj23V5ytsSxQ=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
+github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/gobuffalo/flect v1.0.2 h1:eqjPGSo2WmjgY2XlpGwo2NXgL3RucAKo4k4qQMNA5sA=
github.com/gobuffalo/flect v1.0.2/go.mod h1:A5msMlrHtLqh9umBSnvabjsMrCcCpAyzglnDvkbYKHs=
github.com/gobuffalo/flect v1.0.3 h1:xeWBM2nui+qnVvNM4S3foBhCAL2XgPU+a7FdpelbTq4=
@@ -28,6 +40,8 @@ github.com/goccy/go-yaml v1.11.3 h1:B3W9IdWbvrUu2OYQGwvU1nZtvMQJPBKgBUuweJjLj6I=
github.com/goccy/go-yaml v1.11.3/go.mod h1:wKnAMd44+9JAAnGQpWVEgBzGt3YuTaQ4uXoHvE4m7WU=
github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
+github.com/goccy/go-yaml v1.19.2 h1:PmFC1S6h8ljIz6gMRBopkjP1TVT7xuwrButHID66PoM=
+github.com/goccy/go-yaml v1.19.2/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -40,6 +54,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4=
github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
+github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@@ -62,16 +78,27 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee h1:W5t00kpgFdJifH4BDsTlE89Zl93FEloxaWZfGcifgq8=
+github.com/modern-go/reflect2 v1.0.3-0.20250322232337-35a7c28c31ee/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
+github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
+github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
+github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY=
+github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
+github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU=
+github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
+github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk=
+github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
@@ -87,6 +114,11 @@ go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
go.uber.org/zap v1.27.0 h1:aJMhYGrd5QSmlpLMr2MftRKl7t8J8PTZPA732ud/XR8=
go.uber.org/zap v1.27.0/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.uber.org/zap v1.27.1 h1:08RqriUEv8+ArZRYSTXy1LeBScaMpVSTBhCeaZYfMYc=
+go.uber.org/zap v1.27.1/go.mod h1:GB2qFLM7cTU87MWRP2mPIjqfIDnGu+VIO4V/SdhGo2E=
+go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
+go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
+go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@@ -94,12 +126,16 @@ golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
+golang.org/x/crypto v0.47.0 h1:V6e3FRj+n4dbpw86FJ8Fv7XVOql7TEwpHapKoMJ/GO8=
+golang.org/x/crypto v0.47.0/go.mod h1:ff3Y9VzzKbwSSEzWqJsJVBnWmRwRSHt/6Op5n9bQc4A=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg=
golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ=
+golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
+golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@@ -108,11 +144,15 @@ golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.42.0 h1:jzkYrhi3YQWD6MLBJcsklgQsoAcw89EcZbJw8Z614hs=
golang.org/x/net v0.42.0/go.mod h1:FF1RA5d3u7nAYA4z2TkclSCKh68eSXtiFwcWQpPXdt8=
+golang.org/x/net v0.49.0 h1:eeHFmOGUTtaaPSGNmjBKpbng9MulQsJURQUAfUwY++o=
+golang.org/x/net v0.49.0/go.mod h1:/ysNB2EvaqvesRkuLAyjI1ycPZlQHM3q01F02UY/MV8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
+golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
+golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -126,6 +166,8 @@ golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
+golang.org/x/text v0.33.0 h1:B3njUFyqtHDUI5jMn1YIr5B0IE2U0qck04r6d4KPAxE=
+golang.org/x/text v0.33.0/go.mod h1:LuMebE6+rBincTi9+xWTY8TztLzKHc/9C1uBCG27+q8=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -134,6 +176,8 @@ golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw=
+golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
+golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -153,26 +197,38 @@ k8s.io/apiextensions-apiserver v0.29.0 h1:0VuspFG7Hj+SxyF/Z/2T0uFbI5gb5LRgEyUVE3
k8s.io/apiextensions-apiserver v0.29.0/go.mod h1:TKmpy3bTS0mr9pylH0nOt/QzQRrW7/h7yLdRForMZwc=
k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs=
k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc=
+k8s.io/apiextensions-apiserver v0.35.0 h1:3xHk2rTOdWXXJM+RDQZJvdx0yEOgC0FgQ1PlJatA5T4=
+k8s.io/apiextensions-apiserver v0.35.0/go.mod h1:E1Ahk9SADaLQ4qtzYFkwUqusXTcaV2uw3l14aqpL2LU=
k8s.io/apimachinery v0.29.3 h1:2tbx+5L7RNvqJjn7RIuIKu9XTsIZ9Z5wX2G22XAa5EU=
k8s.io/apimachinery v0.29.3/go.mod h1:hx/S4V2PNW4OMg3WizRrHutyB5la0iCUbZym+W0EQIU=
k8s.io/apimachinery v0.33.3 h1:4ZSrmNa0c/ZpZJhAgRdcsFcZOw1PQU1bALVQ0B3I5LA=
k8s.io/apimachinery v0.33.3/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM=
+k8s.io/apimachinery v0.35.0 h1:Z2L3IHvPVv/MJ7xRxHEtk6GoJElaAqDCCU0S6ncYok8=
+k8s.io/apimachinery v0.35.0/go.mod h1:jQCgFZFR1F4Ik7hvr2g84RTJSZegBc8yHgFWKn//hns=
k8s.io/klog/v2 v2.110.1 h1:U/Af64HJf7FcwMcXyKm2RPM22WZzyR7OSpYj5tg3cL0=
k8s.io/klog/v2 v2.110.1/go.mod h1:YGtd1984u+GgbuZ7e08/yBuAfKLSO0+uR1Fhi6ExXjo=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE=
+k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4 h1:HhDfevmPS+OalTjQRKbTHppRIz01AWi8s45TMXStgYY=
+k8s.io/kube-openapi v0.0.0-20260127142750-a19766b6e2d4/go.mod h1:kdmbQkyfwUagLfXIad1y2TdrjPFWp2Q89B3qkRwf/pQ=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI=
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
+k8s.io/utils v0.0.0-20260108192941-914a6e750570 h1:JT4W8lsdrGENg9W+YwwdLJxklIuKWdRm+BC+xt33FOY=
+k8s.io/utils v0.0.0-20260108192941-914a6e750570/go.mod h1:xDxuJ0whA3d0I4mf/C4ppKHxXynQ+fxnkmQH0vTHnuk=
sigs.k8s.io/controller-tools v0.14.0 h1:rnNoCC5wSXlrNoBKKzL70LNJKIQKEzT6lloG6/LF73A=
sigs.k8s.io/controller-tools v0.14.0/go.mod h1:TV7uOtNNnnR72SpzhStvPkoS/U5ir0nMudrkrC4M9Sc=
sigs.k8s.io/controller-tools v0.18.0 h1:rGxGZCZTV2wJreeRgqVoWab/mfcumTMmSwKzoM9xrsE=
sigs.k8s.io/controller-tools v0.18.0/go.mod h1:gLKoiGBriyNh+x1rWtUQnakUYEujErjXs9pf+x/8n1U=
+sigs.k8s.io/controller-tools v0.20.0 h1:VWZF71pwSQ2lZZCt7hFGJsOfDc5dVG28/IysjjMWXL8=
+sigs.k8s.io/controller-tools v0.20.0/go.mod h1:b4qPmjGU3iZwqn34alUU5tILhNa9+VXK+J3QV0fT/uU=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730 h1:IpInykpT6ceI+QxKBbEflcR5EXP7sU1kvOlxwZh5txg=
+sigs.k8s.io/json v0.0.0-20250730193827-2d320260d730/go.mod h1:mdzfpAEoE6DHQEN0uh9ZbOCuHbLK5wOm7dK4ctXE9Tg=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
@@ -180,5 +236,7 @@ sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+s
sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0 h1:jTijUJbW353oVOd9oTlifJqOGEkUw2jB/fXCbTiQEco=
+sigs.k8s.io/structured-merge-diff/v6 v6.3.0/go.mod h1:M3W8sfWvn2HhQDIbGWj3S099YozAsymCo/wrT5ohRUE=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY=
diff --git a/.bingo/variables.env b/.bingo/variables.env
index 9a7fb41fa..60a9a45a9 100644
--- a/.bingo/variables.env
+++ b/.bingo/variables.env
@@ -16,7 +16,7 @@ CONTROLLER_GEN="${GOBIN}/controller-gen-v0.20.0"
CRD_DIFF="${GOBIN}/crd-diff-v0.5.0"
-CRD_REF_DOCS="${GOBIN}/crd-ref-docs-v0.2.0"
+CRD_REF_DOCS="${GOBIN}/crd-ref-docs-v0.3.0"
GOJQ="${GOBIN}/gojq-v0.12.17"
diff --git a/commitchecker.yaml b/commitchecker.yaml
index 49d499367..90c377711 100644
--- a/commitchecker.yaml
+++ b/commitchecker.yaml
@@ -1,4 +1,4 @@
-expectedMergeBase: e617310cb535a41a5161d083ad293e7f5bd40077
+expectedMergeBase: e41eb44fd2f166f40bb35d81918c96c576f55f3b
upstreamBranch: main
upstreamOrg: operator-framework
upstreamRepo: operator-controller
diff --git a/docs/api-reference/olmv1-api-reference.md b/docs/api-reference/olmv1-api-reference.md
index 74a1f635a..3ee7f1938 100644
--- a/docs/api-reference/olmv1-api-reference.md
+++ b/docs/api-reference/olmv1-api-reference.md
@@ -97,10 +97,10 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `packageName` _string_ | packageName specifies the name of the package to be installed and is used to filter
the content from catalogs.
It is required, immutable, and follows the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
Some examples of valid values are:
- some-package
- 123-package
- 1-package-2
- somepackage
Some examples of invalid values are:
- -some-package
- some-package-
- thisisareallylongpackagenamethatisgreaterthanthemaximumlength
- some.package
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 253
Required: \{\}
|
-| `version` _string_ | version is an optional semver constraint (a specific version or range of versions).
When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
known as comparison strings.
You can add additional comparison strings using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
|
-| `channels` _string array_ | channels is optional and specifies a set of channels belonging to the package
specified in the packageName field.
A channel is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
You can specify no more than 256 channels.
When specified, it constrains the set of installable bundles and the automated upgrade path.
This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
- Automatic upgrades are constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
items:MaxLength: 253
items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \}
|
-| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
When unspecified, all ClusterCatalogs are used in the bundle selection process. | | |
-| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
are enforced for the package referenced in the packageName field.
Allowed values are "CatalogProvided", "SelfCertified", or omitted.
When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
author are met.
When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
This allows upgrades and downgrades to any version of the package.
This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
such as data loss.
Use this option only if you have independently verified the changes.
When omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
|
+| `version` _string_ | version is an optional semver constraint (a specific version or range of versions).
When unspecified, the latest version available is installed.
Acceptable version ranges are no longer than 64 characters.
Version ranges are composed of comma- or space-delimited values and one or more comparison operators,
known as comparison strings.
You can add additional comparison strings using the OR operator (\|\|).
# Range Comparisons
To specify a version range, you can use a comparison string like ">=3.0,
<3.6". When specifying a range, automatic updates will occur within that
range. The example comparison string means "install any version greater than
or equal to 3.0.0 but less than 3.6.0.". It also states intent that if any
upgrades are available within the version range after initial installation,
those upgrades should be automatically performed.
# Pinned Versions
To specify an exact version to install you can use a version range that
"pins" to a specific version. When pinning to a specific version, no
automatic updates will occur. An example of a pinned version range is
"0.6.0", which means "only install version 0.6.0 and never
upgrade from this version".
# Basic Comparison Operators
The basic comparison operators and their meanings are:
- "=", equal (not aliased to an operator)
- "!=", not equal
- "<", less than
- ">", greater than
- ">=", greater than OR equal to
- "<=", less than OR equal to
# Wildcard Comparisons
You can use the "x", "X", and "*" characters as wildcard characters in all
comparison operations. Some examples of using the wildcard characters:
- "1.2.x", "1.2.X", and "1.2.*" is equivalent to ">=1.2.0, < 1.3.0"
- ">= 1.2.x", ">= 1.2.X", and ">= 1.2.*" is equivalent to ">= 1.2.0"
- "<= 2.x", "<= 2.X", and "<= 2.*" is equivalent to "< 3"
- "x", "X", and "*" is equivalent to ">= 0.0.0"
# Patch Release Comparisons
When you want to specify a minor version up to the next major version you
can use the "~" character to perform patch comparisons. Some examples:
- "~1.2.3" is equivalent to ">=1.2.3, <1.3.0"
- "~1" and "~1.x" is equivalent to ">=1, <2"
- "~2.3" is equivalent to ">=2.3, <2.4"
- "~1.2.x" is equivalent to ">=1.2.0, <1.3.0"
# Major Release Comparisons
You can use the "^" character to make major release comparisons after a
stable 1.0.0 version is published. If there is no stable version published, // minor versions define the stability level. Some examples:
- "^1.2.3" is equivalent to ">=1.2.3, <2.0.0"
- "^1.2.x" is equivalent to ">=1.2.0, <2.0.0"
- "^2.3" is equivalent to ">=2.3, <3"
- "^2.x" is equivalent to ">=2.0.0, <3"
- "^0.2.3" is equivalent to ">=0.2.3, <0.3.0"
- "^0.2" is equivalent to ">=0.2.0, <0.3.0"
- "^0.0.3" is equvalent to ">=0.0.3, <0.0.4"
- "^0.0" is equivalent to ">=0.0.0, <0.1.0"
- "^0" is equivalent to ">=0.0.0, <1.0.0"
# OR Comparisons
You can use the "\|\|" character to represent an OR operation in the version
range. Some examples:
- ">=1.2.3, <2.0.0 \|\| >3.0.0"
- "^0 \|\| ^3 \|\| ^5"
For more information on semver, please see https://semver.org/ | | MaxLength: 64
Optional: \{\}
|
+| `channels` _string array_ | channels is optional and specifies a set of channels belonging to the package
specified in the packageName field.
A channel is a package-author-defined stream of updates for an extension.
Each channel in the list must follow the DNS subdomain standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters, hyphens (-) or periods (.),
start and end with an alphanumeric character, and be no longer than 253 characters.
You can specify no more than 256 channels.
When specified, it constrains the set of installable bundles and the automated upgrade path.
This constraint is an AND operation with the version field. For example:
- Given channel is set to "foo"
- Given version is set to ">=1.0.0, <1.5.0"
- Only bundles that exist in channel "foo" AND satisfy the version range comparison are considered installable
- Automatic upgrades are constrained to upgrade edges defined by the selected channel
When unspecified, upgrade edges across all channels are used to identify valid automatic upgrade paths.
Some examples of valid values are:
- 1.1.x
- alpha
- stable
- stable-v1
- v1-stable
- dev-preview
- preview
- community
Some examples of invalid values are:
- -some-channel
- some-channel-
- thisisareallylongchannelnamethatisgreaterthanthemaximumlength
- original_40
- --default-channel
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxItems: 256
items:MaxLength: 253
items:XValidation: \{self.matches("^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$") channels entries must be valid DNS1123 subdomains \}
Optional: \{\}
|
+| `selector` _[LabelSelector](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#labelselector-v1-meta)_ | selector is optional and filters the set of ClusterCatalogs used in the bundle selection process.
When unspecified, all ClusterCatalogs are used in the bundle selection process. | | Optional: \{\}
|
+| `upgradeConstraintPolicy` _[UpgradeConstraintPolicy](#upgradeconstraintpolicy)_ | upgradeConstraintPolicy is optional and controls whether the upgrade paths defined in the catalog
are enforced for the package referenced in the packageName field.
Allowed values are "CatalogProvided", "SelfCertified", or omitted.
When set to "CatalogProvided", automatic upgrades only occur when upgrade constraints specified by the package
author are met.
When set to "SelfCertified", the upgrade constraints specified by the package author are ignored.
This allows upgrades and downgrades to any version of the package.
This is considered a dangerous operation as it can lead to unknown and potentially disastrous outcomes,
such as data loss.
Use this option only if you have independently verified the changes.
When omitted, the default value is "CatalogProvided". | CatalogProvided | Enum: [CatalogProvided SelfCertified]
Optional: \{\}
|
#### CatalogSource
@@ -118,7 +118,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `type` _[SourceType](#sourcetype)_ | type is a required field that specifies the type of source for the catalog.
The only allowed value is "Image".
When set to "Image", the ClusterCatalog content is sourced from an OCI image.
When using an image source, the image field must be set and must be the only field defined for this type. | | Enum: [Image]
Required: \{\}
|
-| `image` _[ImageSource](#imagesource)_ | image configures how catalog contents are sourced from an OCI image.
It is required when type is Image, and forbidden otherwise. | | |
+| `image` _[ImageSource](#imagesource)_ | image configures how catalog contents are sourced from an OCI image.
It is required when type is Image, and forbidden otherwise. | | Optional: \{\}
|
#### ClusterCatalog
@@ -137,11 +137,11 @@ _Appears in:_
| --- | --- | --- | --- |
| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterCatalog` | | |
-| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
-| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | Optional: \{\}
|
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | Optional: \{\}
|
| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `spec` _[ClusterCatalogSpec](#clustercatalogspec)_ | spec is a required field that defines the desired state of the ClusterCatalog.
The controller ensures that the catalog is unpacked and served over the catalog content HTTP server. | | Required: \{\}
|
-| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains the following information about the state of the ClusterCatalog:
- Whether the catalog contents are being served via the catalog content HTTP server
- Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | |
+| `status` _[ClusterCatalogStatus](#clustercatalogstatus)_ | status contains the following information about the state of the ClusterCatalog:
- Whether the catalog contents are being served via the catalog content HTTP server
- Whether the ClusterCatalog is progressing to a new state
- A reference to the source from which the catalog contents were retrieved | | Optional: \{\}
|
#### ClusterCatalogList
@@ -158,8 +158,8 @@ ClusterCatalogList contains a list of ClusterCatalog
| --- | --- | --- | --- |
| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterCatalogList` | | |
-| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
-| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | Optional: \{\}
|
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | Optional: \{\}
|
| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
| `items` _[ClusterCatalog](#clustercatalog) array_ | items is a list of ClusterCatalogs.
items is required. | | Required: \{\}
|
@@ -178,8 +178,8 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `source` _[CatalogSource](#catalogsource)_ | source is a required field that defines the source of a catalog.
A catalog contains information on content that can be installed on a cluster.
The catalog source makes catalog contents discoverable and usable by other on-cluster components.
These components can present the content in a GUI dashboard or install content from the catalog on the cluster.
The catalog source must contain catalog metadata in the File-Based Catalog (FBC) format.
For more information on FBC, see https://olm.operatorframework.io/docs/reference/file-based-catalogs/#docs.
Below is a minimal example of a ClusterCatalogSpec that sources a catalog from an image:
source:
type: Image
image:
ref: quay.io/operatorhubio/catalog:latest | | Required: \{\}
|
-| `priority` _integer_ | priority is an optional field that defines a priority for this ClusterCatalog.
Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
Higher numbers mean higher priority.
Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
Clients should prompt users for additional input to break the tie.
When omitted, the default priority is 0.
Use negative numbers to specify a priority lower than the default.
Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | Maximum: 2.147483647e+09
Minimum: -2.147483648e+09
|
-| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
Clients should consider this ClusterCatalog and its contents as usable.
When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
Treat this the same as if the ClusterCatalog does not exist.
Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. | Available | Enum: [Unavailable Available]
|
+| `priority` _integer_ | priority is an optional field that defines a priority for this ClusterCatalog.
Clients use the ClusterCatalog priority as a tie-breaker between ClusterCatalogs that meet their requirements.
Higher numbers mean higher priority.
Clients decide how to handle scenarios where multiple ClusterCatalogs with the same priority meet their requirements.
Clients should prompt users for additional input to break the tie.
When omitted, the default priority is 0.
Use negative numbers to specify a priority lower than the default.
Use positive numbers to specify a priority higher than the default.
The lowest possible value is -2147483648.
The highest possible value is 2147483647. | 0 | Maximum: 2.147483647e+09
Minimum: -2.147483648e+09
Optional: \{\}
|
+| `availabilityMode` _[AvailabilityMode](#availabilitymode)_ | availabilityMode is an optional field that defines how the ClusterCatalog is made available to clients on the cluster.
Allowed values are "Available", "Unavailable", or omitted.
When omitted, the default value is "Available".
When set to "Available", the catalog contents are unpacked and served over the catalog content HTTP server.
Clients should consider this ClusterCatalog and its contents as usable.
When set to "Unavailable", the catalog contents are no longer served over the catalog content HTTP server.
Treat this the same as if the ClusterCatalog does not exist.
Use "Unavailable" when you want to keep the ClusterCatalog but treat it as if it doesn't exist. | Available | Enum: [Unavailable Available]
Optional: \{\}
|
#### ClusterCatalogStatus
@@ -195,10 +195,10 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
- When status is True and reason is Available, the catalog contents are being served.
- When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
- When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
- When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
- When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
- The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
- The Progressing condition is True with reason Retrying because the system is working to serve the new version. | | |
-| `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type. | | |
-| `urls` _[ClusterCatalogURLs](#clustercatalogurls)_ | urls contains the URLs that can be used to access the catalog. | | |
-| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the catalog contents were extracted from their source format.
For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
This extraction from the source format is called "unpacking". | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of this ClusterCatalog.
The current condition types are Serving and Progressing.
The Serving condition represents whether the catalog contents are being served via the HTTP(S) web server:
- When status is True and reason is Available, the catalog contents are being served.
- When status is False and reason is Unavailable, the catalog contents are not being served because the contents are not yet available.
- When status is False and reason is UserSpecifiedUnavailable, the catalog contents are not being served because the catalog has been intentionally marked as unavailable.
The Progressing condition represents whether the ClusterCatalog is progressing or is ready to progress towards a new state:
- When status is True and reason is Retrying, an error occurred that may be resolved on subsequent reconciliation attempts.
- When status is True and reason is Succeeded, the ClusterCatalog has successfully progressed to a new state and is ready to continue progressing.
- When status is False and reason is Blocked, an error occurred that requires manual intervention for recovery.
If the system initially fetched contents and polling identifies updates, both conditions can be active simultaneously:
- The Serving condition remains True with reason Available because the previous contents are still served via the HTTP(S) web server.
- The Progressing condition is True with reason Retrying because the system is working to serve the new version. | | Optional: \{\}
|
+| `resolvedSource` _[ResolvedCatalogSource](#resolvedcatalogsource)_ | resolvedSource contains information about the resolved source based on the source type. | | Optional: \{\}
|
+| `urls` _[ClusterCatalogURLs](#clustercatalogurls)_ | urls contains the URLs that can be used to access the catalog. | | Optional: \{\}
|
+| `lastUnpacked` _[Time](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#time-v1-meta)_ | lastUnpacked represents the last time the catalog contents were extracted from their source format.
For example, when using an Image source, the OCI image is pulled and image layers are written to a file-system backed cache.
This extraction from the source format is called "unpacking". | | Optional: \{\}
|
#### ClusterCatalogURLs
@@ -232,11 +232,11 @@ _Appears in:_
| --- | --- | --- | --- |
| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterExtension` | | |
-| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
-| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
-| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
-| `spec` _[ClusterExtensionSpec](#clusterextensionspec)_ | spec is an optional field that defines the desired state of the ClusterExtension. | | |
-| `status` _[ClusterExtensionStatus](#clusterextensionstatus)_ | status is an optional field that defines the observed state of the ClusterExtension. | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | Optional: \{\}
|
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | Optional: \{\}
|
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | Optional: \{\}
|
+| `spec` _[ClusterExtensionSpec](#clusterextensionspec)_ | spec is an optional field that defines the desired state of the ClusterExtension. | | Optional: \{\}
|
+| `status` _[ClusterExtensionStatus](#clusterextensionstatus)_ | status is an optional field that defines the observed state of the ClusterExtension. | | Optional: \{\}
|
#### ClusterExtensionConfig
@@ -254,7 +254,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `configType` _[ClusterExtensionConfigType](#clusterextensionconfigtype)_ | configType is required and specifies the type of configuration source.
The only allowed value is "Inline".
When set to "Inline", the cluster extension configuration is defined inline within the ClusterExtension resource. | | Enum: [Inline]
Required: \{\}
|
-| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the ClusterExtension.
It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1
Type: object
|
+| `inline` _[JSON](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#json-v1-apiextensions-k8s-io)_ | inline contains JSON or YAML values specified directly in the ClusterExtension.
It is used to specify arbitrary configuration values for the ClusterExtension.
It must be set if configType is 'Inline' and must be a valid JSON/YAML object containing at least one property.
The configuration values are validated at runtime against a JSON schema provided by the bundle. | | MinProperties: 1
Type: object
Optional: \{\}
|
#### ClusterExtensionConfigType
@@ -287,7 +287,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is optional and configures the checks that run before installation or upgrade
of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration is used. | | |
+| `preflight` _[PreflightConfig](#preflightconfig)_ | preflight is optional and configures the checks that run before installation or upgrade
of the content for the package specified in the packageName field.
When specified, it replaces the default preflight configuration for install/upgrade actions.
When not specified, the default configuration is used. | | Optional: \{\}
|
#### ClusterExtensionInstallStatus
@@ -320,9 +320,9 @@ ClusterExtensionList contains a list of ClusterExtension
| --- | --- | --- | --- |
| `apiVersion` _string_ | `olm.operatorframework.io/v1` | | |
| `kind` _string_ | `ClusterExtensionList` | | |
-| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
-| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
-| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | Optional: \{\}
|
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | Optional: \{\}
|
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | Optional: \{\}
|
| `items` _[ClusterExtension](#clusterextension) array_ | items is a required list of ClusterExtension objects. | | Required: \{\}
|
@@ -342,9 +342,9 @@ _Appears in:_
| `namespace` _string_ | namespace specifies a Kubernetes namespace.
This is the namespace where the provided ServiceAccount must exist.
It also designates the default namespace where namespace-scoped resources for the extension are applied to the cluster.
Some extensions may contain namespace-scoped resources to be applied in other namespaces.
This namespace must exist.
The namespace field is required, immutable, and follows the DNS label standard as defined in [RFC 1123].
It must contain only lowercase alphanumeric characters or hyphens (-), start and end with an alphanumeric character,
and be no longer than 63 characters.
[RFC 1123]: https://tools.ietf.org/html/rfc1123 | | MaxLength: 63
Required: \{\}
|
| `serviceAccount` _[ServiceAccountReference](#serviceaccountreference)_ | serviceAccount specifies a ServiceAccount used to perform all interactions with the cluster
that are required to manage the extension.
The ServiceAccount must be configured with the necessary permissions to perform these interactions.
The ServiceAccount must exist in the namespace referenced in the spec.
The serviceAccount field is required. | | Required: \{\}
|
| `source` _[SourceConfig](#sourceconfig)_ | source is required and selects the installation source of content for this ClusterExtension.
Set the sourceType field to perform the selection.
Catalog is currently the only implemented sourceType.
Setting sourceType to "Catalog" requires the catalog field to also be defined.
Below is a minimal example of a source definition (in yaml):
source:
sourceType: Catalog
catalog:
packageName: example-package | | Required: \{\}
|
-| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is optional and configures installation options for the ClusterExtension,
such as the pre-flight check configuration. | | |
-| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is optional and specifies bundle-specific configuration.
Configuration is bundle-specific and a bundle may provide a configuration schema.
When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version. | | |
-| `progressDeadlineMinutes` _integer_ | progressDeadlineMinutes is an optional field that defines the maximum period
of time in minutes after which an installation should be considered failed and
require manual intervention. This functionality is disabled when no value
is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours).
| | Maximum: 720
Minimum: 10
|
+| `install` _[ClusterExtensionInstallConfig](#clusterextensioninstallconfig)_ | install is optional and configures installation options for the ClusterExtension,
such as the pre-flight check configuration. | | Optional: \{\}
|
+| `config` _[ClusterExtensionConfig](#clusterextensionconfig)_ | config is optional and specifies bundle-specific configuration.
Configuration is bundle-specific and a bundle may provide a configuration schema.
When not specified, the default configuration of the resolved bundle is used.
config is validated against a configuration schema provided by the resolved bundle. If the bundle does not provide
a configuration schema the bundle is deemed to not be configurable. More information on how
to configure bundles can be found in the OLM documentation associated with your current OLM version. | | Optional: \{\}
|
+| `progressDeadlineMinutes` _integer_ | progressDeadlineMinutes is an optional field that defines the maximum period
of time in minutes after which an installation should be considered failed and
require manual intervention. This functionality is disabled when no value
is provided. The minimum period is 10 minutes, and the maximum is 720 minutes (12 hours).
| | Maximum: 720
Minimum: 10
Optional: \{\}
|
#### ClusterExtensionStatus
@@ -360,9 +360,9 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of the ClusterExtension.
The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether the bundle has been installed for this ClusterExtension:
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata.
These are indications from a package owner to guide users away from a particular package, channel, or bundle:
- BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable.
- ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable.
- PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable.
- Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. | | |
-| `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | |
-| `activeRevisions` _[RevisionStatus](#revisionstatus) array_ | activeRevisions holds a list of currently active (non-archived) ClusterExtensionRevisions,
including both installed and rolling out revisions.
| | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions represents the current state of the ClusterExtension.
The set of condition types which apply to all spec.source variations are Installed and Progressing.
The Installed condition represents whether the bundle has been installed for this ClusterExtension:
- When Installed is True and the Reason is Succeeded, the bundle has been successfully installed.
- When Installed is False and the Reason is Failed, the bundle has failed to install.
The Progressing condition represents whether or not the ClusterExtension is advancing towards a new state.
When Progressing is True and the Reason is Succeeded, the ClusterExtension is making progress towards a new state.
When Progressing is True and the Reason is Retrying, the ClusterExtension has encountered an error that could be resolved on subsequent reconciliation attempts.
When Progressing is False and the Reason is Blocked, the ClusterExtension has encountered an error that requires manual intervention for recovery.
When Progressing is True and Reason is RollingOut, the ClusterExtension has one or more ClusterExtensionRevisions in active roll out.
When the ClusterExtension is sourced from a catalog, it surfaces deprecation conditions based on catalog metadata.
These are indications from a package owner to guide users away from a particular package, channel, or bundle:
- BundleDeprecated is True if the installed bundle is marked deprecated, False if not deprecated, or Unknown if no bundle is installed yet or if catalog data is unavailable.
- ChannelDeprecated is True if any requested channel is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable.
- PackageDeprecated is True if the requested package is marked deprecated, False if not deprecated, or Unknown if catalog data is unavailable.
- Deprecated is a rollup condition that is True when any deprecation exists, False when none exist, or Unknown when catalog data is unavailable. | | Optional: \{\}
|
+| `install` _[ClusterExtensionInstallStatus](#clusterextensioninstallstatus)_ | install is a representation of the current installation status for this ClusterExtension. | | Optional: \{\}
|
+| `activeRevisions` _[RevisionStatus](#revisionstatus) array_ | activeRevisions holds a list of currently active (non-archived) ClusterExtensionRevisions,
including both installed and rolling out revisions.
| | Optional: \{\}
|
@@ -384,7 +384,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `ref` _string_ | ref is a required field that defines the reference to a container image containing catalog contents.
It cannot be more than 1000 characters.
A reference has 3 parts: the domain, name, and identifier.
The domain is typically the registry where an image is located.
It must be alphanumeric characters (lowercase and uppercase) separated by the "." character.
Hyphenation is allowed, but the domain must start and end with alphanumeric characters.
Specifying a port to use is also allowed by adding the ":" character followed by numeric values.
The port must be the last value in the domain.
Some examples of valid domain values are "registry.mydomain.io", "quay.io", "my-registry.io:8080".
The name is typically the repository in the registry where an image is located.
It must contain lowercase alphanumeric characters separated only by the ".", "_", "__", "-" characters.
Multiple names can be concatenated with the "/" character.
The domain and name are combined using the "/" character.
Some examples of valid name values are "operatorhubio/catalog", "catalog", "my-catalog.prod".
An example of the domain and name parts of a reference being combined is "quay.io/operatorhubio/catalog".
The identifier is typically the tag or digest for an image reference and is present at the end of the reference.
It starts with a separator character used to distinguish the end of the name and beginning of the identifier.
For a digest-based reference, the "@" character is the separator.
For a tag-based reference, the ":" character is the separator.
An identifier is required in the reference.
Digest-based references must contain an algorithm reference immediately after the "@" separator.
The algorithm reference must be followed by the ":" character and an encoded string.
The algorithm must start with an uppercase or lowercase alpha character followed by alphanumeric characters and may contain the "-", "_", "+", and "." characters.
Some examples of valid algorithm values are "sha256", "sha256+b64u", "multihash+base58".
The encoded string following the algorithm must be hex digits (a-f, A-F, 0-9) and must be a minimum of 32 characters.
Tag-based references must begin with a word character (alphanumeric + "_") followed by word characters or ".", and "-" characters.
The tag must not be longer than 127 characters.
An example of a valid digest-based image reference is "quay.io/operatorhubio/catalog@sha256:200d4ddb2a73594b91358fe6397424e975205bfbe44614f5846033cad64b3f05"
An example of a valid tag-based image reference is "quay.io/operatorhubio/catalog:latest" | | MaxLength: 1000
Required: \{\}
|
-| `pollIntervalMinutes` _integer_ | pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
You cannot specify pollIntervalMinutes when ref is a digest-based reference.
When omitted, the image is not polled for new content. | | Minimum: 1
|
+| `pollIntervalMinutes` _integer_ | pollIntervalMinutes is an optional field that sets the interval, in minutes, at which the image source is polled for new content.
You cannot specify pollIntervalMinutes when ref is a digest-based reference.
When omitted, the image is not polled for new content. | | Minimum: 1
Optional: \{\}
|
#### PreflightConfig
@@ -451,7 +451,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `name` _string_ | name of the ClusterExtensionRevision resource | | |
-| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions optionally expose Progressing and Available condition of the revision,
in case when it is not yet marked as successfully installed (condition Succeeded is not set to True).
Given that a ClusterExtension should remain available during upgrades, an observer may use these conditions
to get more insights about reasons for its current state. | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.31/#condition-v1-meta) array_ | conditions optionally expose Progressing and Available condition of the revision,
in case when it is not yet marked as successfully installed (condition Succeeded is not set to True).
Given that a ClusterExtension should remain available during upgrades, an observer may use these conditions
to get more insights about reasons for its current state. | | Optional: \{\}
|
#### ServiceAccountReference
@@ -484,7 +484,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
| `sourceType` _string_ | sourceType is required and specifies the type of install source.
The only allowed value is "Catalog".
When set to "Catalog", information for determining the appropriate bundle of content to install
is fetched from ClusterCatalog resources on the cluster.
When using the Catalog sourceType, the catalog field must also be set. | | Enum: [Catalog]
Required: \{\}
|
-| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog configures how information is sourced from a catalog.
It is required when sourceType is "Catalog", and forbidden otherwise. | | |
+| `catalog` _[CatalogFilter](#catalogfilter)_ | catalog configures how information is sourced from a catalog.
It is required when sourceType is "Catalog", and forbidden otherwise. | | Optional: \{\}
|
#### SourceType
diff --git a/go.mod b/go.mod
index 9cb05349a..3177a7a72 100644
--- a/go.mod
+++ b/go.mod
@@ -6,7 +6,7 @@ require (
github.com/BurntSushi/toml v1.6.0
github.com/Masterminds/semver/v3 v3.4.0
github.com/blang/semver/v4 v4.0.0
- github.com/cert-manager/cert-manager v1.19.2
+ github.com/cert-manager/cert-manager v1.19.3
github.com/containerd/containerd v1.7.30
github.com/cucumber/godog v0.15.1
github.com/evanphx/json-patch v5.9.11+incompatible
@@ -45,7 +45,7 @@ require (
k8s.io/klog/v2 v2.130.1
k8s.io/kubernetes v1.34.0
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4
- pkg.package-operator.run/boxcutter v0.8.0
+ pkg.package-operator.run/boxcutter v0.8.1
sigs.k8s.io/controller-runtime v0.22.4
sigs.k8s.io/controller-tools v0.20.0
sigs.k8s.io/crdify v0.5.0
@@ -113,8 +113,8 @@ require (
github.com/go-gorp/gorp/v3 v3.1.0 // indirect
github.com/go-jose/go-jose/v4 v4.1.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.22.3 // indirect
- github.com/go-openapi/jsonreference v0.21.3 // indirect
+ github.com/go-openapi/jsonpointer v0.22.4 // indirect
+ github.com/go-openapi/jsonreference v0.21.4 // indirect
github.com/go-openapi/swag v0.25.4 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
@@ -220,12 +220,12 @@ require (
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.podman.io/common v0.66.0 // indirect
go.podman.io/storage v1.61.0 // indirect
diff --git a/go.sum b/go.sum
index b67a2a0ae..c50ca4c1b 100644
--- a/go.sum
+++ b/go.sum
@@ -47,8 +47,8 @@ github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
-github.com/cert-manager/cert-manager v1.19.2 h1:jSprN1h5pgNDSl7HClAmIzXuTxic/5FXJ32kbQHqjlM=
-github.com/cert-manager/cert-manager v1.19.2/go.mod h1:e9NzLtOKxTw7y99qLyWGmPo6mrC1Nh0EKKcMkRfK+GE=
+github.com/cert-manager/cert-manager v1.19.3 h1:3d0Nk/HO3BOmAdBJNaBh+6YgaO3Ciey3xCpOjiX5Obs=
+github.com/cert-manager/cert-manager v1.19.3/go.mod h1:e9NzLtOKxTw7y99qLyWGmPo6mrC1Nh0EKKcMkRfK+GE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chai2010/gettext-go v1.0.2 h1:1Lwwip6Q2QGsAdl/ZKPCwTe9fe0CjlUbqj5bFNSjIRk=
@@ -172,10 +172,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8=
-github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo=
-github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
-github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4=
+github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
+github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
+github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
+github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
@@ -539,8 +539,8 @@ go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.6
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.63.0/go.mod h1:fvPi2qXDqFs8M4B4fmJhE92TyQs9Ydjlg3RvfUp+NbQ=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
+go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2 h1:06ZeJRe5BnYXceSM9Vya83XXVaNGe3H1QqsvqRANQq8=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.12.2/go.mod h1:DvPtKE63knkDVP88qpatBj81JxN+w1bqfVbsbCbj1WY=
go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.12.2 h1:tPLwQlXbJ8NSOfZc4OkgU5h2A38M4c9kfHSVc4PFQGs=
@@ -565,16 +565,16 @@ go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0 h1:G8Xec/SgZQricwW
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.36.0/go.mod h1:PD57idA/AiFD5aqoxGxCvT/ILJPeHy3MjqU/NS7KogY=
go.opentelemetry.io/otel/log v0.12.2 h1:yob9JVHn2ZY24byZeaXpTVoPS6l+UrrxmxmPKohXTwc=
go.opentelemetry.io/otel/log v0.12.2/go.mod h1:ShIItIxSYxufUMt+1H5a2wbckGli3/iCfuEbVZi/98E=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
+go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/log v0.12.2 h1:yNoETvTByVKi7wHvYS6HMcZrN5hFLD7I++1xIZ/k6W0=
go.opentelemetry.io/otel/sdk/log v0.12.2/go.mod h1:DcpdmUXHJgSqN/dh+XMWa7Vf89u9ap0/AAk/XGLnEzY=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
+go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.podman.io/common v0.66.0 h1:KElE3HKLFdMdJL+jv5ExBiX2Dh4Qcv8ovmzaBGRsyZM=
@@ -810,8 +810,8 @@ k8s.io/utils v0.0.0-20251002143259-bc988d571ff4 h1:SjGebBtkBqHFOli+05xYbK8YF1Dzk
k8s.io/utils v0.0.0-20251002143259-bc988d571ff4/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
oras.land/oras-go/v2 v2.6.0 h1:X4ELRsiGkrbeox69+9tzTu492FMUu7zJQW6eJU+I2oc=
oras.land/oras-go/v2 v2.6.0/go.mod h1:magiQDfG6H1O9APp+rOsvCPcW1GD2MM7vgnKY0Y+u1o=
-pkg.package-operator.run/boxcutter v0.8.0 h1:lHKUey0jtpuboRup0LmToseJQNVFF00KtzhwG5vyJgk=
-pkg.package-operator.run/boxcutter v0.8.0/go.mod h1:BWraKaCa8V08MbFRMXrmJdb4A8/Ytc90vAJkExVUJqM=
+pkg.package-operator.run/boxcutter v0.8.1 h1:GoSXwI20s4WvFkOXq7xNlINzWmODBlA+2VMY7UflPFA=
+pkg.package-operator.run/boxcutter v0.8.1/go.mod h1:nk6XIcTS3i8WV1+GpaFwjPcNM+tK0jHczoa0qGnbUbk=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0 h1:qPrZsv1cwQiFeieFlRqT627fVZ+tyfou/+S5S0H5ua0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.33.0/go.mod h1:Ve9uj1L+deCXFrPOk1LpFXqTg7LCFzFso6PA48q/XZw=
sigs.k8s.io/controller-runtime v0.22.4 h1:GEjV7KV3TY8e+tJ2LCTxUTanW4z/FmNB7l327UfMq9A=
diff --git a/internal/operator-controller/config/config.go b/internal/operator-controller/config/config.go
index 30f30951c..afb89dff5 100644
--- a/internal/operator-controller/config/config.go
+++ b/internal/operator-controller/config/config.go
@@ -33,6 +33,8 @@ import (
"github.com/santhosh-tekuri/jsonschema/v6"
"github.com/santhosh-tekuri/jsonschema/v6/kind"
"sigs.k8s.io/yaml"
+
+ "github.com/operator-framework/api/pkg/operators/v1alpha1"
)
const (
@@ -48,6 +50,10 @@ const (
FormatSingleNamespaceInstallMode = "singleNamespaceInstallMode"
)
+// DeploymentConfig is a type alias for v1alpha1.SubscriptionConfig
+// to maintain clear naming in the OLMv1 context while reusing the v0 type.
+type DeploymentConfig = v1alpha1.SubscriptionConfig
+
// SchemaProvider lets each package format type describe what configuration it accepts.
//
// Different package format types provide schemas in different ways:
diff --git a/internal/operator-controller/rukpak/render/registryv1/generators/generators.go b/internal/operator-controller/rukpak/render/registryv1/generators/generators.go
index 7d5d435ea..8f45bb762 100644
--- a/internal/operator-controller/rukpak/render/registryv1/generators/generators.go
+++ b/internal/operator-controller/rukpak/render/registryv1/generators/generators.go
@@ -3,6 +3,7 @@ package generators
import (
"cmp"
"fmt"
+ "reflect"
"slices"
"strconv"
"strings"
@@ -21,6 +22,7 @@ import (
"github.com/operator-framework/api/pkg/operators/v1alpha1"
registrybundle "github.com/operator-framework/operator-registry/pkg/lib/bundle"
+ "github.com/operator-framework/operator-controller/internal/operator-controller/config"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util"
@@ -98,6 +100,9 @@ func BundleCSVDeploymentGenerator(rv1 *bundle.RegistryV1, opts render.Options) (
ensureCorrectDeploymentCertVolumes(deploymentResource, *secretInfo)
}
+ // Apply deployment configuration if provided
+ applyCustomConfigToDeployment(deploymentResource, opts.DeploymentConfig)
+
objs = append(objs, deploymentResource)
}
return objs, nil
@@ -578,3 +583,214 @@ func getWebhookNamespaceSelector(targetNamespaces []string) *metav1.LabelSelecto
}
return nil
}
+
+// applyCustomConfigToDeployment applies the deployment configuration to all containers in the deployment.
+// It follows OLMv0 behavior for applying configuration to deployments.
+// See https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go
+func applyCustomConfigToDeployment(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if config == nil {
+ return
+ }
+
+ // Apply all configuration modifications following OLMv0 behavior
+ applyEnvironmentConfig(deployment, config)
+ applyEnvironmentFromConfig(deployment, config)
+ applyVolumeConfig(deployment, config)
+ applyVolumeMountConfig(deployment, config)
+ applyTolerationsConfig(deployment, config)
+ applyResourcesConfig(deployment, config)
+ applyNodeSelectorConfig(deployment, config)
+ applyAffinityConfig(deployment, config)
+ applyAnnotationsConfig(deployment, config)
+}
+
+// applyEnvironmentConfig applies environment variables to all containers in the deployment.
+// Environment variables from config override existing environment variables with the same name.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L11-L27
+func applyEnvironmentConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if len(config.Env) == 0 {
+ return
+ }
+
+ for i := range deployment.Spec.Template.Spec.Containers {
+ container := &deployment.Spec.Template.Spec.Containers[i]
+
+ // Create a map to track existing env var names for override behavior
+ existingEnvMap := make(map[string]int)
+ for idx, env := range container.Env {
+ existingEnvMap[env.Name] = idx
+ }
+
+ // Apply config env vars, overriding existing ones with same name
+ for _, configEnv := range config.Env {
+ if existingIdx, exists := existingEnvMap[configEnv.Name]; exists {
+ // Override existing env var
+ container.Env[existingIdx] = configEnv
+ } else {
+ // Append new env var
+ container.Env = append(container.Env, configEnv)
+ }
+ }
+ }
+}
+
+// applyEnvironmentFromConfig appends EnvFrom sources to all containers in the deployment.
+// Duplicate EnvFrom sources are not added.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L65-L81
+func applyEnvironmentFromConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if len(config.EnvFrom) == 0 {
+ return
+ }
+
+ for i := range deployment.Spec.Template.Spec.Containers {
+ container := &deployment.Spec.Template.Spec.Containers[i]
+
+ // Check for duplicates before appending
+ for _, configEnvFrom := range config.EnvFrom {
+ isDuplicate := false
+ for _, existingEnvFrom := range container.EnvFrom {
+ if reflect.DeepEqual(existingEnvFrom, configEnvFrom) {
+ isDuplicate = true
+ break
+ }
+ }
+ if !isDuplicate {
+ container.EnvFrom = append(container.EnvFrom, configEnvFrom)
+ }
+ }
+ }
+}
+
+// applyVolumeConfig appends volumes to the deployment's pod spec.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L104-L117
+func applyVolumeConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if len(config.Volumes) == 0 {
+ return
+ }
+
+ deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, config.Volumes...)
+}
+
+// applyVolumeMountConfig appends volume mounts to all containers in the deployment.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L149-L165
+func applyVolumeMountConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if len(config.VolumeMounts) == 0 {
+ return
+ }
+
+ for i := range deployment.Spec.Template.Spec.Containers {
+ container := &deployment.Spec.Template.Spec.Containers[i]
+ container.VolumeMounts = append(container.VolumeMounts, config.VolumeMounts...)
+ }
+}
+
+// applyTolerationsConfig appends tolerations to the deployment's pod spec.
+// Duplicate tolerations are not added.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L197-L209
+func applyTolerationsConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if len(config.Tolerations) == 0 {
+ return
+ }
+
+ // Check for duplicates before appending
+ for _, configToleration := range config.Tolerations {
+ isDuplicate := false
+ for _, existingToleration := range deployment.Spec.Template.Spec.Tolerations {
+ if reflect.DeepEqual(existingToleration, configToleration) {
+ isDuplicate = true
+ break
+ }
+ }
+ if !isDuplicate {
+ deployment.Spec.Template.Spec.Tolerations = append(deployment.Spec.Template.Spec.Tolerations, configToleration)
+ }
+ }
+}
+
+// applyResourcesConfig applies resource requirements to all containers in the deployment.
+// This completely replaces existing resource requirements.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L236-L255
+func applyResourcesConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if config.Resources == nil {
+ return
+ }
+
+ for i := range deployment.Spec.Template.Spec.Containers {
+ container := &deployment.Spec.Template.Spec.Containers[i]
+ container.Resources = *config.Resources
+ }
+}
+
+// applyNodeSelectorConfig applies node selector to the deployment's pod spec.
+// This completely replaces existing node selector.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L257-L271
+func applyNodeSelectorConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if config.NodeSelector == nil {
+ return
+ }
+
+ deployment.Spec.Template.Spec.NodeSelector = config.NodeSelector
+}
+
+// applyAffinityConfig applies affinity configuration to the deployment's pod spec.
+// This selectively overrides non-nil affinity sub-attributes.
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L273-L341
+func applyAffinityConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if config.Affinity == nil {
+ return
+ }
+
+ if deployment.Spec.Template.Spec.Affinity == nil {
+ deployment.Spec.Template.Spec.Affinity = &corev1.Affinity{}
+ }
+
+ if config.Affinity.NodeAffinity != nil {
+ deployment.Spec.Template.Spec.Affinity.NodeAffinity = config.Affinity.NodeAffinity
+ }
+
+ if config.Affinity.PodAffinity != nil {
+ deployment.Spec.Template.Spec.Affinity.PodAffinity = config.Affinity.PodAffinity
+ }
+
+ if config.Affinity.PodAntiAffinity != nil {
+ deployment.Spec.Template.Spec.Affinity.PodAntiAffinity = config.Affinity.PodAntiAffinity
+ }
+}
+
+// applyAnnotationsConfig applies annotations to the deployment and its pod template.
+// Existing deployment and pod annotations take precedence over config annotations (no override).
+// This follows OLMv0 behavior:
+// https://github.com/operator-framework/operator-lifecycle-manager/blob/v0.39.0/pkg/controller/operators/olm/overrides/inject/inject.go#L343-L378
+func applyAnnotationsConfig(deployment *appsv1.Deployment, config *config.DeploymentConfig) {
+ if len(config.Annotations) == 0 {
+ return
+ }
+
+ // Apply to deployment metadata
+ if deployment.Annotations == nil {
+ deployment.Annotations = make(map[string]string)
+ }
+ for key, value := range config.Annotations {
+ if _, exists := deployment.Annotations[key]; !exists {
+ deployment.Annotations[key] = value
+ }
+ }
+
+ // Apply to pod template metadata
+ if deployment.Spec.Template.Annotations == nil {
+ deployment.Spec.Template.Annotations = make(map[string]string)
+ }
+ for key, value := range config.Annotations {
+ if _, exists := deployment.Spec.Template.Annotations[key]; !exists {
+ deployment.Spec.Template.Annotations[key] = value
+ }
+ }
+}
diff --git a/internal/operator-controller/rukpak/render/registryv1/generators/generators_test.go b/internal/operator-controller/rukpak/render/registryv1/generators/generators_test.go
index 59be3c6df..22ce6d28b 100644
--- a/internal/operator-controller/rukpak/render/registryv1/generators/generators_test.go
+++ b/internal/operator-controller/rukpak/render/registryv1/generators/generators_test.go
@@ -12,6 +12,7 @@ import (
corev1 "k8s.io/api/core/v1"
rbacv1 "k8s.io/api/rbac/v1"
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
+ "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
@@ -20,6 +21,7 @@ import (
"github.com/operator-framework/api/pkg/operators/v1alpha1"
+ "github.com/operator-framework/operator-controller/internal/operator-controller/config"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render/registryv1/generators"
@@ -2508,3 +2510,586 @@ func Test_CertProviderResourceGenerator_Succeeds(t *testing.T) {
}),
}, objs)
}
+
+func Test_BundleCSVDeploymentGenerator_WithDeploymentConfig(t *testing.T) {
+ for _, tc := range []struct {
+ name string
+ bundle *bundle.RegistryV1
+ opts render.Options
+ verify func(*testing.T, []client.Object)
+ }{
+ {
+ name: "applies env vars from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "manager",
+ Env: []corev1.EnvVar{
+ {Name: "EXISTING_VAR", Value: "existing_value"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Env: []corev1.EnvVar{
+ {Name: "NEW_VAR", Value: "new_value"},
+ {Name: "EXISTING_VAR", Value: "overridden_value"},
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+ require.Len(t, dep.Spec.Template.Spec.Containers, 1)
+ envVars := dep.Spec.Template.Spec.Containers[0].Env
+
+ // Should have both vars
+ require.Len(t, envVars, 2)
+
+ // Existing var should be overridden
+ var existingVar *corev1.EnvVar
+ for i := range envVars {
+ if envVars[i].Name == "EXISTING_VAR" {
+ existingVar = &envVars[i]
+ break
+ }
+ }
+ require.NotNil(t, existingVar)
+ require.Equal(t, "overridden_value", existingVar.Value)
+
+ // New var should be added
+ var newVar *corev1.EnvVar
+ for i := range envVars {
+ if envVars[i].Name == "NEW_VAR" {
+ newVar = &envVars[i]
+ break
+ }
+ }
+ require.NotNil(t, newVar)
+ require.Equal(t, "new_value", newVar.Value)
+ },
+ },
+ {
+ name: "applies resources from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Resources: &corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ corev1.ResourceMemory: resource.MustParse("128Mi"),
+ },
+ Limits: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("200m"),
+ corev1.ResourceMemory: resource.MustParse("256Mi"),
+ },
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+ resources := dep.Spec.Template.Spec.Containers[0].Resources
+
+ require.Equal(t, resource.MustParse("100m"), *resources.Requests.Cpu())
+ require.Equal(t, resource.MustParse("128Mi"), *resources.Requests.Memory())
+ require.Equal(t, resource.MustParse("200m"), *resources.Limits.Cpu())
+ require.Equal(t, resource.MustParse("256Mi"), *resources.Limits.Memory())
+ },
+ },
+ {
+ name: "applies tolerations from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Tolerations: []corev1.Toleration{
+ {
+ Key: "node.kubernetes.io/disk-type",
+ Operator: corev1.TolerationOpEqual,
+ Value: "ssd",
+ Effect: corev1.TaintEffectNoSchedule,
+ },
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+ tolerations := dep.Spec.Template.Spec.Tolerations
+
+ require.Len(t, tolerations, 1)
+ require.Equal(t, "node.kubernetes.io/disk-type", tolerations[0].Key)
+ require.Equal(t, corev1.TolerationOpEqual, tolerations[0].Operator)
+ require.Equal(t, "ssd", tolerations[0].Value)
+ require.Equal(t, corev1.TaintEffectNoSchedule, tolerations[0].Effect)
+ },
+ },
+ {
+ name: "applies node selector from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ NodeSelector: map[string]string{
+ "existing-key": "existing-value",
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ NodeSelector: map[string]string{
+ "disk-type": "ssd",
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ // Node selector should be replaced, not merged
+ require.Equal(t, map[string]string{"disk-type": "ssd"}, dep.Spec.Template.Spec.NodeSelector)
+ },
+ },
+ {
+ name: "applies affinity from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Affinity: &corev1.Affinity{
+ NodeAffinity: &corev1.NodeAffinity{
+ RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
+ NodeSelectorTerms: []corev1.NodeSelectorTerm{
+ {
+ MatchExpressions: []corev1.NodeSelectorRequirement{
+ {
+ Key: "kubernetes.io/arch",
+ Operator: corev1.NodeSelectorOpIn,
+ Values: []string{"amd64", "arm64"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ require.NotNil(t, dep.Spec.Template.Spec.Affinity)
+ require.NotNil(t, dep.Spec.Template.Spec.Affinity.NodeAffinity)
+ require.NotNil(t, dep.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution)
+ require.Len(t, dep.Spec.Template.Spec.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, 1)
+ },
+ },
+ {
+ name: "applies annotations from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithAnnotations(map[string]string{
+ "csv-annotation": "csv-value",
+ }).
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ ObjectMeta: metav1.ObjectMeta{
+ Annotations: map[string]string{
+ "existing-pod-annotation": "existing-pod-value",
+ },
+ },
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Annotations: map[string]string{
+ "config-annotation": "config-value",
+ "existing-pod-annotation": "should-not-override",
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ // Deployment annotations should include config annotations
+ // (CSV annotations are only merged into pod template by the generator)
+ require.Contains(t, dep.Annotations, "config-annotation")
+ require.Equal(t, "config-value", dep.Annotations["config-annotation"])
+
+ // Pod template annotations should include CSV annotations (merged by generator)
+ // and existing pod annotations should take precedence over config
+ require.Contains(t, dep.Spec.Template.Annotations, "csv-annotation")
+ require.Equal(t, "csv-value", dep.Spec.Template.Annotations["csv-annotation"])
+ require.Contains(t, dep.Spec.Template.Annotations, "existing-pod-annotation")
+ require.Equal(t, "existing-pod-value", dep.Spec.Template.Annotations["existing-pod-annotation"])
+ require.Contains(t, dep.Spec.Template.Annotations, "config-annotation")
+ require.Equal(t, "config-value", dep.Spec.Template.Annotations["config-annotation"])
+ },
+ },
+ {
+ name: "applies volumes and volume mounts from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Volumes: []corev1.Volume{
+ {
+ Name: "config-volume",
+ VolumeSource: corev1.VolumeSource{
+ ConfigMap: &corev1.ConfigMapVolumeSource{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "my-config"},
+ },
+ },
+ },
+ },
+ VolumeMounts: []corev1.VolumeMount{
+ {
+ Name: "config-volume",
+ MountPath: "/etc/config",
+ },
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ // Check volume was added
+ require.Len(t, dep.Spec.Template.Spec.Volumes, 1)
+ require.Equal(t, "config-volume", dep.Spec.Template.Spec.Volumes[0].Name)
+
+ // Check volume mount was added to container
+ require.Len(t, dep.Spec.Template.Spec.Containers[0].VolumeMounts, 1)
+ require.Equal(t, "config-volume", dep.Spec.Template.Spec.Containers[0].VolumeMounts[0].Name)
+ require.Equal(t, "/etc/config", dep.Spec.Template.Spec.Containers[0].VolumeMounts[0].MountPath)
+ },
+ },
+ {
+ name: "applies envFrom from deployment config",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ EnvFrom: []corev1.EnvFromSource{
+ {
+ ConfigMapRef: &corev1.ConfigMapEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "env-config"},
+ },
+ },
+ {
+ SecretRef: &corev1.SecretEnvSource{
+ LocalObjectReference: corev1.LocalObjectReference{Name: "env-secret"},
+ },
+ },
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ envFrom := dep.Spec.Template.Spec.Containers[0].EnvFrom
+ require.Len(t, envFrom, 2)
+
+ // Check ConfigMap ref
+ require.NotNil(t, envFrom[0].ConfigMapRef)
+ require.Equal(t, "env-config", envFrom[0].ConfigMapRef.Name)
+
+ // Check Secret ref
+ require.NotNil(t, envFrom[1].SecretRef)
+ require.Equal(t, "env-secret", envFrom[1].SecretRef.Name)
+ },
+ },
+ {
+ name: "applies all config fields together",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "manager"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Env: []corev1.EnvVar{
+ {Name: "ENV_VAR", Value: "value"},
+ },
+ Resources: &corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ },
+ },
+ Tolerations: []corev1.Toleration{
+ {Key: "key1", Operator: corev1.TolerationOpEqual, Value: "value1"},
+ },
+ NodeSelector: map[string]string{
+ "disk": "ssd",
+ },
+ Annotations: map[string]string{
+ "annotation-key": "annotation-value",
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ // Verify env was applied
+ require.Len(t, dep.Spec.Template.Spec.Containers[0].Env, 1)
+ require.Equal(t, "ENV_VAR", dep.Spec.Template.Spec.Containers[0].Env[0].Name)
+
+ // Verify resources were applied
+ require.NotNil(t, dep.Spec.Template.Spec.Containers[0].Resources.Requests)
+
+ // Verify tolerations were applied
+ require.Len(t, dep.Spec.Template.Spec.Tolerations, 1)
+
+ // Verify node selector was applied
+ require.Equal(t, map[string]string{"disk": "ssd"}, dep.Spec.Template.Spec.NodeSelector)
+
+ // Verify annotations were applied
+ require.Contains(t, dep.Annotations, "annotation-key")
+ require.Contains(t, dep.Spec.Template.Annotations, "annotation-key")
+ },
+ },
+ {
+ name: "applies config to multiple containers",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {Name: "container1"},
+ {Name: "container2"},
+ {Name: "container3"},
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: &config.DeploymentConfig{
+ Env: []corev1.EnvVar{
+ {Name: "SHARED_VAR", Value: "shared_value"},
+ },
+ Resources: &corev1.ResourceRequirements{
+ Requests: corev1.ResourceList{
+ corev1.ResourceCPU: resource.MustParse("100m"),
+ },
+ },
+ },
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ // All containers should have the env var
+ for i := range dep.Spec.Template.Spec.Containers {
+ container := dep.Spec.Template.Spec.Containers[i]
+ require.Len(t, container.Env, 1)
+ require.Equal(t, "SHARED_VAR", container.Env[0].Name)
+ require.Equal(t, "shared_value", container.Env[0].Value)
+
+ // All containers should have the resources
+ require.NotNil(t, container.Resources.Requests)
+ require.Equal(t, resource.MustParse("100m"), *container.Resources.Requests.Cpu())
+ }
+ },
+ },
+ {
+ name: "nil deployment config does nothing",
+ bundle: &bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().
+ WithStrategyDeploymentSpecs(
+ v1alpha1.StrategyDeploymentSpec{
+ Name: "test-deployment",
+ Spec: appsv1.DeploymentSpec{
+ Template: corev1.PodTemplateSpec{
+ Spec: corev1.PodSpec{
+ Containers: []corev1.Container{
+ {
+ Name: "manager",
+ Env: []corev1.EnvVar{
+ {Name: "EXISTING_VAR", Value: "existing_value"},
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ ).Build(),
+ },
+ opts: render.Options{
+ InstallNamespace: "test-ns",
+ TargetNamespaces: []string{"test-ns"},
+ DeploymentConfig: nil,
+ },
+ verify: func(t *testing.T, objs []client.Object) {
+ require.Len(t, objs, 1)
+ dep := objs[0].(*appsv1.Deployment)
+
+ // Should only have the existing env var
+ require.Len(t, dep.Spec.Template.Spec.Containers[0].Env, 1)
+ require.Equal(t, "EXISTING_VAR", dep.Spec.Template.Spec.Containers[0].Env[0].Name)
+ require.Equal(t, "existing_value", dep.Spec.Template.Spec.Containers[0].Env[0].Value)
+ },
+ },
+ } {
+ t.Run(tc.name, func(t *testing.T) {
+ objs, err := generators.BundleCSVDeploymentGenerator(tc.bundle, tc.opts)
+ require.NoError(t, err)
+ tc.verify(t, objs)
+ })
+ }
+}
diff --git a/internal/operator-controller/rukpak/render/render.go b/internal/operator-controller/rukpak/render/render.go
index f7e419c78..2de43f0f7 100644
--- a/internal/operator-controller/rukpak/render/render.go
+++ b/internal/operator-controller/rukpak/render/render.go
@@ -10,6 +10,7 @@ import (
"github.com/operator-framework/api/pkg/operators/v1alpha1"
+ "github.com/operator-framework/operator-controller/internal/operator-controller/config"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util"
hashutil "github.com/operator-framework/operator-controller/internal/shared/util/hash"
@@ -62,6 +63,9 @@ type Options struct {
TargetNamespaces []string
UniqueNameGenerator UniqueNameGenerator
CertificateProvider CertificateProvider
+ // DeploymentConfig contains optional customizations to apply to CSV deployments.
+ // If nil, no customizations are applied.
+ DeploymentConfig *config.DeploymentConfig
}
func (o *Options) apply(opts ...Option) *Options {
@@ -109,6 +113,14 @@ func WithCertificateProvider(provider CertificateProvider) Option {
}
}
+// WithDeploymentConfig sets the deployment configuration to apply to CSV deployments.
+// If deploymentConfig is nil, no customizations are applied.
+func WithDeploymentConfig(deploymentConfig *config.DeploymentConfig) Option {
+ return func(o *Options) {
+ o.DeploymentConfig = deploymentConfig
+ }
+}
+
type BundleRenderer struct {
BundleValidator BundleValidator
ResourceGenerators []ResourceGenerator
diff --git a/internal/operator-controller/rukpak/render/render_test.go b/internal/operator-controller/rukpak/render/render_test.go
index ca1459889..452f9f3fd 100644
--- a/internal/operator-controller/rukpak/render/render_test.go
+++ b/internal/operator-controller/rukpak/render/render_test.go
@@ -13,6 +13,7 @@ import (
"github.com/operator-framework/api/pkg/operators/v1alpha1"
+ "github.com/operator-framework/operator-controller/internal/operator-controller/config"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/bundle"
"github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/render"
. "github.com/operator-framework/operator-controller/internal/operator-controller/rukpak/util/testing"
@@ -382,3 +383,79 @@ func Test_BundleValidatorCallsAllValidationFnsInOrder(t *testing.T) {
require.NoError(t, val.Validate(nil))
require.Equal(t, "hi", actual)
}
+
+func Test_WithDeploymentConfig(t *testing.T) {
+ t.Run("sets deployment config when provided", func(t *testing.T) {
+ expectedConfig := &config.DeploymentConfig{
+ Env: []corev1.EnvVar{
+ {Name: "TEST_ENV", Value: "test-value"},
+ },
+ }
+
+ var receivedConfig *config.DeploymentConfig
+ renderer := render.BundleRenderer{
+ ResourceGenerators: []render.ResourceGenerator{
+ func(rv1 *bundle.RegistryV1, opts render.Options) ([]client.Object, error) {
+ receivedConfig = opts.DeploymentConfig
+ return nil, nil
+ },
+ },
+ }
+
+ _, err := renderer.Render(
+ bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().WithInstallModeSupportFor(v1alpha1.InstallModeTypeAllNamespaces).Build(),
+ },
+ "test-namespace",
+ render.WithDeploymentConfig(expectedConfig),
+ )
+
+ require.NoError(t, err)
+ require.Equal(t, expectedConfig, receivedConfig)
+ })
+
+ t.Run("deployment config is nil when not provided", func(t *testing.T) {
+ var receivedConfig *config.DeploymentConfig
+ renderer := render.BundleRenderer{
+ ResourceGenerators: []render.ResourceGenerator{
+ func(rv1 *bundle.RegistryV1, opts render.Options) ([]client.Object, error) {
+ receivedConfig = opts.DeploymentConfig
+ return nil, nil
+ },
+ },
+ }
+
+ _, err := renderer.Render(
+ bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().WithInstallModeSupportFor(v1alpha1.InstallModeTypeAllNamespaces).Build(),
+ },
+ "test-namespace",
+ )
+
+ require.NoError(t, err)
+ require.Nil(t, receivedConfig)
+ })
+
+ t.Run("deployment config is nil when explicitly set to nil", func(t *testing.T) {
+ var receivedConfig *config.DeploymentConfig
+ renderer := render.BundleRenderer{
+ ResourceGenerators: []render.ResourceGenerator{
+ func(rv1 *bundle.RegistryV1, opts render.Options) ([]client.Object, error) {
+ receivedConfig = opts.DeploymentConfig
+ return nil, nil
+ },
+ },
+ }
+
+ _, err := renderer.Render(
+ bundle.RegistryV1{
+ CSV: clusterserviceversion.Builder().WithInstallModeSupportFor(v1alpha1.InstallModeTypeAllNamespaces).Build(),
+ },
+ "test-namespace",
+ render.WithDeploymentConfig(nil),
+ )
+
+ require.NoError(t, err)
+ require.Nil(t, receivedConfig)
+ })
+}
diff --git a/openshift/tests-extension/go.mod b/openshift/tests-extension/go.mod
index bb0a56216..890f27847 100644
--- a/openshift/tests-extension/go.mod
+++ b/openshift/tests-extension/go.mod
@@ -41,8 +41,8 @@ require (
github.com/fxamacker/cbor/v2 v2.9.0 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-openapi/jsonpointer v0.22.3 // indirect
- github.com/go-openapi/jsonreference v0.21.3 // indirect
+ github.com/go-openapi/jsonpointer v0.22.4 // indirect
+ github.com/go-openapi/jsonreference v0.21.4 // indirect
github.com/go-openapi/swag v0.25.4 // indirect
github.com/go-openapi/swag/cmdutils v0.25.4 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
@@ -85,12 +85,12 @@ require (
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 // indirect
- go.opentelemetry.io/otel v1.38.0 // indirect
+ go.opentelemetry.io/otel v1.39.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 // indirect
- go.opentelemetry.io/otel/metric v1.38.0 // indirect
+ go.opentelemetry.io/otel/metric v1.39.0 // indirect
go.opentelemetry.io/otel/sdk v1.38.0 // indirect
- go.opentelemetry.io/otel/trace v1.38.0 // indirect
+ go.opentelemetry.io/otel/trace v1.39.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
go.yaml.in/yaml/v2 v2.4.3 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
diff --git a/openshift/tests-extension/go.sum b/openshift/tests-extension/go.sum
index 36a97db99..1db7d9fe6 100644
--- a/openshift/tests-extension/go.sum
+++ b/openshift/tests-extension/go.sum
@@ -36,10 +36,10 @@ github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.3.0 h1:XGdV8XW8zdwFiwOA2Dryh1gj2KRQyOOoNmBy4EplIcQ=
github.com/go-logr/zapr v1.3.0/go.mod h1:YKepepNBd1u/oyhd/yQmtjVXmm9uML4IXUgMOwR8/Gg=
-github.com/go-openapi/jsonpointer v0.22.3 h1:dKMwfV4fmt6Ah90zloTbUKWMD+0he+12XYAsPotrkn8=
-github.com/go-openapi/jsonpointer v0.22.3/go.mod h1:0lBbqeRsQ5lIanv3LHZBrmRGHLHcQoOXQnf88fHlGWo=
-github.com/go-openapi/jsonreference v0.21.3 h1:96Dn+MRPa0nYAR8DR1E03SblB5FJvh7W6krPI0Z7qMc=
-github.com/go-openapi/jsonreference v0.21.3/go.mod h1:RqkUP0MrLf37HqxZxrIAtTWW4ZJIK1VzduhXYBEeGc4=
+github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
+github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
+github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
+github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/swag v0.25.4 h1:OyUPUFYDPDBMkqyxOTkqDYFnrhuhi9NR6QVUvIochMU=
github.com/go-openapi/swag v0.25.4/go.mod h1:zNfJ9WZABGHCFg2RnY0S4IOkAcVTzJ6z2Bi+Q4i6qFQ=
github.com/go-openapi/swag/cmdutils v0.25.4 h1:8rYhB5n6WawR192/BfUu2iVlxqVR9aRgGJP6WaBoW+4=
@@ -207,20 +207,20 @@ go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0 h1:RbKq8BG0FI8OiXhBfcRtqqHcZcka+gU3cskNuf05R18=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.63.0/go.mod h1:h06DGIukJOevXaj/xrNjhi/2098RZzcLTbc0jDAUbsg=
-go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8=
-go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM=
+go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
+go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0 h1:lwI4Dc5leUqENgGuQImwLo4WnuXFPetmPpkLi2IrX54=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.38.0/go.mod h1:Kz/oCE7z5wuyhPxsXDuaPteSWqjSBD5YaSdbxZYGbGk=
-go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA=
-go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI=
+go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
+go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E=
go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg=
go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM=
go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA=
-go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE=
-go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs=
+go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
+go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4=
go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
diff --git a/openshift/tests-extension/pkg/bindata/operator/operator.go b/openshift/tests-extension/pkg/bindata/operator/operator.go
index 72f9baae4..65a003fbd 100644
--- a/openshift/tests-extension/pkg/bindata/operator/operator.go
+++ b/openshift/tests-extension/pkg/bindata/operator/operator.go
@@ -102,7 +102,7 @@ func dockerfile() (*asset, error) {
return a, nil
}
-var _manifestsRegistryClusterserviceversionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x4b\x6f\xe3\x36\x10\xbe\xfb\x57\x0c\x74\xae\x14\x3b\x9b\xec\x06\x3c\x35\xbb\x1b\x04\x05\xf2\x42\x92\xf6\xd0\xc5\xa2\x18\x53\x13\x9b\x35\x45\xb2\x24\xe5\xac\x9a\xe6\xbf\x17\x94\x64\x5b\xb2\x68\xc7\x8b\xed\xa5\xbc\x58\x22\xe7\xf1\xcd\x93\x23\xa3\x11\xbf\x91\x75\x42\x2b\x06\xda\x90\x45\xaf\xad\xcb\xb8\xb6\xa4\xc3\x4f\x71\xb4\x9c\xa0\x34\x73\x9c\x8c\x16\x42\xe5\x0c\x3e\xc9\xd2\x79\xb2\x0f\x64\x97\x82\x53\xcb\x3a\x2a\xc8\x63\x8e\x1e\xd9\x08\x00\x95\xd2\x1e\xbd\xd0\xca\x85\x57\x00\x94\x45\x4a\xdf\xb0\x30\x92\x1c\x83\x7f\xd2\x7a\x13\xe0\x4b\xfb\x0b\xf0\xb2\x7e\x02\x48\x36\x80\x12\x06\x89\xe6\xa9\x36\x39\x2d\x53\x3a\xa6\x6c\x85\xef\xc9\x62\x41\xcf\xda\x2e\x32\xa1\xb3\x37\x29\xd6\x16\x24\x3f\x75\xf5\x04\x73\x82\x86\x7b\x9a\x09\xe7\x6d\xd5\x3f\x5d\x19\x94\xb0\x1e\x3a\x80\x44\xe2\x94\xa4\x1b\xec\xd7\xc8\x4d\xb6\x28\xa7\x64\x15\x79\x72\x41\x73\x81\x0a\x67\x94\xa7\xd3\x2a\x68\x5a\x94\xce\xeb\x42\xfc\x4d\x3d\x55\x3b\x58\x15\x16\x14\x98\x5e\x5e\xe0\xf1\xe2\xe1\x31\xfd\xf8\xeb\xcd\xe7\xab\x0b\x78\x7d\x4d\x7a\xbc\xaf\x7d\x51\xc9\x4e\xb6\xd4\xd5\x01\xe8\x72\xf7\x78\x13\x67\x88\x27\x0c\x54\x29\xe5\x7a\xfb\xb5\x7d\xfa\x5a\xff\x72\x34\x38\x15\x52\x78\x11\xe2\xf8\x11\x9d\xe0\xf0\x8b\x72\x1e\x5b\x0e\x6e\x09\x3d\xe5\xe7\x9e\x41\x72\x3c\x3e\x3e\x4d\xc7\x67\xe9\xf8\xfd\xe3\xe4\x03\x3b\x79\xc7\x4e\x26\xbf\x37\xba\x37\x49\x16\x0d\xd6\xb4\x14\x32\x27\xbb\x49\xc6\xd4\xe5\x8b\x74\x39\xc9\xde\x9d\x64\x93\x43\x04\x18\xab\xff\x24\xee\xff\x90\x58\xe9\xd2\x33\x98\xe9\xda\xb3\xad\xdc\x3a\x1f\x4e\x46\x00\xc1\x51\x0c\x06\x6e\xca\x96\xe3\x6c\x5c\x2b\x0a\x04\xce\x20\x27\x06\x46\x22\xa7\xb9\x0e\xfc\xa3\xe0\xa6\x3a\xcb\x8d\x70\x4d\x0d\xe4\xf4\x24\x94\x68\xf2\x1d\x5e\x82\xcb\x78\x1d\x68\x4b\x4e\x97\xb6\x7b\xde\x1e\xe7\xe4\xb8\x15\xa6\xdd\xd9\x46\xd0\x3d\xce\xe0\xf1\xf6\xf3\x6d\x16\x78\x84\x33\x12\xab\x9b\x38\xea\x11\x80\xe0\x5a\x05\x5c\x29\x4c\xd1\xd1\xfb\x93\xba\x1a\x21\x69\x7c\x5e\x50\x2e\xd0\x57\x86\xda\x1d\xd1\x44\xad\x29\xcf\x95\x45\x61\xe5\x64\xa4\xae\x0a\x52\xde\xad\xb6\x52\xa8\x33\x9e\x75\x72\x65\x5f\xa2\x33\x58\xa7\xf9\x5e\x8e\x1d\xfe\xef\xf0\x70\xad\xbc\xd5\x32\x35\x12\x15\xb1\xd5\xab\x24\x9b\x36\xda\xec\x9a\x76\x87\xac\x74\x0f\x4b\xd7\xe8\xb0\x2c\x19\x29\x38\x3a\x06\x93\xce\xae\x23\x49\xdc\x6b\xcb\x7a\x45\x56\xa0\xe7\xf3\xab\xba\x0d\xb0\xad\x42\x3e\x1c\x33\x80\xf3\x16\x3d\xcd\xaa\x36\x2b\x56\xcb\x53\x61\x24\x7a\xda\xd2\xd9\x69\xb0\xdd\x35\x68\xb6\xdd\x15\x3c\xce\xbd\xdc\xf2\x7c\x4e\x4f\x58\x4a\x5f\x7b\x07\x85\x0a\xb5\x36\x04\x17\x96\x8c\x9a\xf8\x7d\x46\x0e\x3d\x1d\xd6\x52\xcb\xb2\xa0\x81\xe8\xb4\x8d\x64\x93\xff\x2e\xa6\xf8\x49\xcc\xae\xd1\x0c\x31\xad\x92\x60\xee\xbd\xc9\xd3\x46\x40\x84\xa8\xb5\xfd\x5a\xe7\xc4\x60\xfc\xe1\xf4\x34\x12\xbf\xda\x27\x11\x6c\x68\x67\x51\x67\x14\x05\x86\x8b\xf1\x4b\x72\xd4\xe2\x3e\xaa\x41\x64\x6e\x9e\x7c\x1d\x90\x8b\x02\x67\xd4\xe9\xcf\x9f\x6e\x6f\x1e\xef\x6f\xaf\xae\x2e\xee\x07\xad\x3d\x2c\xa3\xad\x8f\x28\x0d\x70\xd6\x50\xef\xb4\xf5\x0c\xce\xc6\x67\x93\x01\x5d\xe3\xe7\x6b\x5d\xaa\x5d\x52\xf6\x3b\x3c\xac\x22\x70\xdf\xa1\x9f\x33\x38\xda\x47\x67\x09\xf3\x5b\x25\x2b\x06\xde\x96\x34\x20\x91\x62\x49\x8a\x9c\xbb\xb3\x7a\x4a\x31\x2c\xc1\x69\x97\xe4\x63\x47\x00\xa6\xd1\x1f\x84\xc4\xcf\x77\xfb\x20\x74\x3b\xe1\x05\xca\xcf\x24\xb1\x7a\x20\xae\x55\x1e\xca\x7c\x3b\xf4\xb5\x18\xb2\x42\xe7\x6b\x9a\xe3\xf1\x80\xa6\xf1\x57\xbc\x5e\x1a\x1f\x88\x1f\xb6\x32\x48\xa9\xfe\x23\x33\x0f\xb0\x72\x32\xb4\x72\x75\x75\x45\x93\x46\x8a\x42\xc4\xd3\x09\x80\x9b\x92\xc1\xe9\x78\x5c\xc4\x53\x89\x0a\x6d\x2b\x06\x93\xe3\xb3\x6b\x11\xa1\xb0\xf4\x57\x49\x6e\xaf\xec\xc9\x1b\xa2\xdf\x9f\x44\x24\x3b\xe2\xa5\x15\xbe\xfa\xa4\x95\xa7\x6f\x51\xe7\xa3\x94\xfa\xf9\xce\x8a\xa5\x90\x34\xa3\x0b\xc7\x51\x62\x73\x41\x3f\xa1\x74\xb1\xa4\xeb\x4d\x43\x51\x4c\xb9\xd5\xd1\x4e\x15\xea\xee\xfc\xea\x6a\x08\xd3\xa3\xf5\xa5\xf9\xa1\xdc\xa9\x65\x50\xfe\xfd\xd9\xf3\x84\x42\x96\x96\x1e\xe7\x96\x5c\x18\x75\x18\xbc\x1b\xe6\xc5\xdb\xb9\xf3\xa6\xab\x6d\xa9\xce\xdd\x8d\x56\xf7\x5a\xfb\x68\xab\x68\xa7\xaa\x73\xce\x43\xe3\x69\x06\x9e\xb6\x6d\x6f\x51\x7a\xb2\x85\x50\x75\x98\x2e\x2d\x72\xba\xdb\x81\xcd\x04\x3a\xe7\xba\x77\x64\x0a\xb6\x94\xdd\xb8\xa5\x61\xa0\xbb\xb4\xba\x34\xbd\x60\xa6\xab\x09\xaa\x05\x1f\xab\x8b\xb4\xbd\x97\x0a\x34\xdd\xce\xb8\x24\x3b\xdd\xa2\x9b\x91\xef\xbd\x4b\xe1\xfa\x1b\xcf\x61\xb2\xe8\x8b\xae\x67\xea\xde\x56\x69\xf2\xed\x2d\x33\xe0\xcb\x49\x52\x87\x68\xa7\x79\x5c\x6b\x9b\xb7\x4e\xcc\x16\x67\x61\x40\x78\xdb\x5e\x49\xe8\xe8\xff\x67\xeb\x21\xa1\xa4\x65\x98\x7d\xf7\x9b\x16\xc1\xd9\x07\x15\x4b\xe1\x83\xa7\xd3\xcd\x58\xb8\x19\xc6\x37\x23\x7b\x98\x5c\x5c\x33\xe7\xbb\xd2\x84\x8a\xa6\xbc\x53\x46\xcd\x8c\x7f\xfb\xac\x6e\x56\x5f\x2f\x6f\x90\x3e\x08\x35\x93\xb4\x93\x7a\xd3\xfc\x1a\xf2\xeb\x52\x7a\x71\xa8\xec\x73\x29\xd7\xa4\xc1\xa5\x0b\xaa\x9e\xb5\xcd\x5b\xf8\xb1\xd1\x5f\x0a\xb5\x68\x8f\xf7\x7d\x1f\x94\x56\x36\x93\x9e\x63\x47\x47\xc3\x4f\xb8\x5c\x17\x28\xd4\x28\x0c\xea\xa2\x3b\xce\xa5\x40\x05\x0a\xc9\xa0\xd2\xa5\xfd\xb9\x7e\xce\xb8\x6e\xee\x93\x46\xdd\xf5\x9a\x01\x02\xf2\x5a\x84\xaf\xfb\x19\x83\xfa\x3f\x84\x11\x80\xb1\x7a\x29\xc2\x17\x6a\x87\xef\xae\xdd\x5b\x71\x6d\x41\x0c\xfa\x36\xa8\x96\xab\xbf\x5b\x9a\x2f\xcd\x7f\x03\x00\x00\xff\xff\x9e\xfb\x1b\xfa\x81\x11\x00\x00")
+var _manifestsRegistryClusterserviceversionYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd4\x58\x4b\x6f\xdb\xb8\x13\xbf\xfb\x53\x0c\x74\xfe\x4b\xb1\xd3\xa4\x0d\x78\xfa\xa7\x6d\x10\x2c\x90\x17\x92\xec\x1e\xb6\x28\x16\x63\x6a\x62\x73\x4d\x91\x5c\x92\x72\xaa\x66\xf3\xdd\x17\x94\x64\x5b\xb2\x68\xc7\x45\xf7\xb2\xbc\x58\x22\xe7\xf1\x9b\x27\x47\x46\x23\x7e\x23\xeb\x84\x56\x0c\xb4\x21\x8b\x5e\x5b\x97\x71\x6d\x49\x87\x9f\xe2\x68\x39\x41\x69\xe6\x38\x19\x2d\x84\xca\x19\x7c\x92\xa5\xf3\x64\x1f\xc8\x2e\x05\xa7\x96\x75\x54\x90\xc7\x1c\x3d\xb2\x11\x00\x2a\xa5\x3d\x7a\xa1\x95\x0b\xaf\x00\x28\x8b\x94\xbe\x61\x61\x24\x39\x06\x7f\xa7\xf5\x26\xc0\x97\xf6\x17\xe0\x65\xfd\x04\x90\x6c\x00\x25\x0c\x12\xcd\x53\x6d\x72\x5a\xa6\x74\x4c\xd9\x0a\xdf\x93\xc5\x82\x9e\xb5\x5d\x64\x42\x67\x6f\x52\xac\x2d\x48\xfe\xd7\xd5\x13\xcc\x09\x1a\xee\x69\x26\x9c\xb7\x55\xff\x74\x65\x50\xc2\x7a\xe8\x00\x12\x89\x53\x92\x6e\xb0\x5f\x23\x37\xd9\xa2\x9c\x92\x55\xe4\xc9\x05\xcd\x05\x2a\x9c\x51\x9e\x4e\xab\xa0\x69\x51\x3a\xaf\x0b\xf1\x9d\x7a\xaa\x76\xb0\x2a\x2c\x28\x30\xbd\xbc\xc0\xe3\xc5\xc3\x63\xfa\xf1\xd7\x9b\xcf\x57\x17\xf0\xfa\x9a\xf4\x78\x5f\xfb\xa2\x92\x9d\x6c\xa9\xab\x03\xd0\xe5\xee\xf1\x26\xce\x10\x4f\x18\xa8\x52\xca\xf5\xf6\x6b\xfb\xf4\xb5\xfe\xe5\x68\x70\x2a\xa4\xf0\x22\xc4\xf1\x23\x3a\xc1\xe1\x17\xe5\x3c\xb6\x1c\xdc\x12\x7a\xca\xcf\x3d\x83\xe4\x78\x7c\x7c\x9a\x8e\xcf\xd2\xf1\xfb\xc7\xc9\x07\x76\xf2\x8e\x9d\x4c\x7e\x6f\x74\x6f\x92\x2c\x1a\xac\x69\x29\x64\x4e\x76\x93\x8c\xa9\xcb\x17\xe9\x72\x92\xbd\x3b\xc9\x26\x87\x08\x30\x56\xff\x49\xdc\xff\x21\xb1\xd2\xa5\x67\x30\xd3\xb5\x67\x5b\xb9\x75\x3e\x9c\x8c\x00\x82\xa3\x18\x0c\xdc\x94\x2d\xc7\xd9\xb8\x56\x14\x08\x9c\x41\x4e\x0c\x8c\x44\x4e\x73\x1d\xf8\x47\xc1\x4d\x75\x96\x1b\xe1\x9a\x1a\xc8\xe9\x49\x28\xd1\xe4\x3b\xbc\x04\x97\xf1\x3a\xd0\x96\x9c\x2e\x6d\xf7\xbc\x3d\xce\xc9\x71\x2b\x4c\xbb\xb3\x8d\xa0\x7b\x9c\xc1\xe3\xed\xe7\xdb\x2c\xf0\x08\x67\x24\x56\x37\x71\xd4\x23\x00\xc1\xb5\x0a\xb8\x52\x98\xa2\xa3\xf7\x27\x75\x35\x42\xd2\xf8\xbc\xa0\x5c\xa0\xaf\x0c\xb5\x3b\xa2\x89\x5a\x53\x9e\x2b\x8b\xc2\xca\xc9\x48\x5d\x15\xa4\xbc\x5b\x6d\xa5\x50\x67\x3c\xeb\xe4\xca\xbe\x44\x67\xb0\x4e\xf3\xbd\x1c\x3b\xfc\xdf\xe1\xe1\x5a\x79\xab\x65\x6a\x24\x2a\x62\xab\x57\x49\x36\x6d\xb4\xd9\x35\xed\x0e\x59\xe9\x1e\x96\xae\xd1\x61\x59\x32\x52\x70\x74\x0c\x26\x9d\x5d\x47\x92\xb8\xd7\x96\xf5\x8a\xac\x40\xcf\xe7\x57\x75\x1b\x60\x5b\x85\x7c\x38\x66\x00\xe7\x2d\x7a\x9a\x55\x6d\x56\xac\x96\xa7\xc2\x48\xf4\xb4\xa5\xb3\xd3\x60\xbb\x6b\xd0\x6c\xbb\x2b\x78\x9c\x7b\xb9\xe5\xf9\x9c\x9e\xb0\x94\xbe\xf6\x0e\x0a\x15\x6a\x6d\x08\x2e\x2c\x19\x35\xf1\xc7\x8c\x1c\x7a\x3a\xac\xa5\x96\x65\x41\x03\xd1\x69\x1b\xc9\x26\xff\x5d\x4c\xf1\x93\x98\x5d\xa3\x19\x62\x5a\x25\xc1\xdc\x7b\x93\xa7\x8d\x80\x08\x51\x6b\xfb\xb5\xce\x89\xc1\xf8\xc3\xe9\x69\x24\x7e\xb5\x4f\x22\xd8\xd0\xce\xa2\xce\x28\x0a\x0c\x17\xe3\x97\xe4\xa8\xc5\x7d\x54\x83\xc8\xdc\x3c\xf9\x3a\x20\x17\x05\xce\xa8\xd3\x9f\x3f\xdd\xde\x3c\xde\xdf\x5e\x5d\x5d\xdc\x0f\x5a\x7b\x58\x46\x5b\x1f\x51\x1a\xe0\xac\xa1\xde\x69\xeb\x19\x9c\x8d\xcf\x26\x03\xba\xc6\xcf\xd7\xba\x54\xbb\xa4\xec\x77\x78\x58\x45\xe0\xbe\x43\x3f\x67\x70\xb4\x8f\xce\x12\xe6\xb7\x4a\x56\x0c\xbc\x2d\x69\x40\x22\xc5\x92\x14\x39\x77\x67\xf5\x94\x62\x58\x82\xd3\x2e\xc9\xc7\x8e\x00\x4c\xa3\x7f\x4e\x28\xfd\xfc\x7b\x9c\x64\xb7\x1b\x42\xc3\x13\x5e\xa0\xfc\x4c\x12\xab\x07\xe2\x5a\xe5\xa1\xd2\xb7\xa3\x5f\x8b\x21\x2b\x74\xbe\xa6\x39\x1e\x0f\x68\x1a\x97\xc5\x4b\xa6\x71\x83\xf8\x69\x43\x83\x94\xea\xdf\xb2\xf3\x00\x33\x27\x43\x33\x57\xd7\x57\x34\x71\xa4\x28\x44\x3c\xa5\x00\xb8\x29\x19\x9c\x8e\xc7\x45\x3c\x9d\xa8\xd0\xb6\x62\x30\x39\x3e\xbb\x16\x11\x0a\x4b\x7f\x95\xe4\xf6\xca\x9e\xbc\x21\xfa\xfd\x49\x44\xb2\x23\x5e\x5a\xe1\xab\x4f\x5a\x79\xfa\x16\xf5\x3e\x4a\xa9\x9f\xef\xac\x58\x0a\x49\x33\xba\x70\x1c\x25\x36\x97\xf4\x13\x4a\x37\x4c\xe9\xad\x89\x28\x8a\x29\xb7\x3a\xda\xad\x42\xed\x9d\x5f\x5d\x0d\x61\x7a\xb4\xbe\x34\x3f\x95\x3c\xb5\x0c\xca\x7f\x3c\x7b\x9e\x50\xc8\xd2\xd2\xe3\xdc\x92\x0b\xe3\x0e\x83\x77\xc3\xbc\x78\x3b\x77\xde\x74\xb5\x2d\xd5\xb9\xbb\xd1\xea\x5e\x6b\x1f\x6d\x17\xed\x64\x75\xce\x79\x68\x3e\xcd\xd0\xd3\xb6\xee\x2d\x4a\x4f\xb6\x10\xaa\x0e\xd3\xa5\x45\x4e\x77\x3b\xb0\x99\x40\xe7\x5c\xf7\x9e\x4c\xc1\x96\xb2\x1b\xb7\x34\x0c\x75\x97\x56\x97\xa6\x17\xcc\x74\x35\x45\xb5\xe0\x63\x75\x91\xb6\x77\x53\x81\xa6\xdb\x1d\x97\x64\xa7\x5b\x74\x33\xf2\xbd\x77\x29\x5c\x7f\xe3\x39\x4c\x17\x7d\xd1\xf5\x5c\xdd\xdb\x2a\x4d\xbe\xbd\x65\x06\x7c\x39\x49\xea\x10\xed\x34\x8f\x6b\x6d\xf3\xd6\x89\xd9\xe2\x2c\x0c\x09\x6f\xdb\x2b\x09\x1d\xfd\xf7\x6c\x3d\x24\x94\xb4\x0c\xf3\xef\x7e\xd3\x22\x38\xfb\xa0\x62\x29\x7c\xf0\x84\xba\x19\x0d\x37\x03\xf9\x66\x6c\x0f\xd3\x8b\x6b\x66\x7d\x57\x9a\x50\xd1\x94\x77\xca\xa8\x99\xf3\x6f\x9f\xd5\xcd\xea\x0b\xe6\x0d\xd2\x07\xa1\x66\x92\x76\x52\x6f\x9a\x5f\x43\x7e\x5d\x4a\x2f\x0e\x95\x7d\x2e\xe5\x9a\x34\xb8\x74\x41\xd5\xb3\xb6\x79\x0b\x3f\x36\xfe\x4b\xa1\x16\xed\xf1\xbe\x6f\x84\xd2\xca\x66\xda\x73\xec\xe8\x68\xf8\x19\x97\xeb\x02\x85\x1a\x85\x61\x5d\x74\x47\xba\x14\xa8\x40\x21\x19\x54\xba\xb4\xff\xaf\x9f\x33\xae\x9b\xfb\xa4\x51\x77\xbd\x66\x80\x80\xbc\x16\xe1\xeb\x7e\xc6\xa0\xfe\x1f\x61\x04\x60\xac\x5e\x8a\xf0\x95\xda\xe1\xbb\x6b\xf7\x56\x5c\x5b\x10\x83\xbe\x0d\xaa\xe5\xea\x2f\x97\xe6\x6b\xf3\x9f\x00\x00\x00\xff\xff\x78\xa4\x24\x61\x85\x11\x00\x00")
func manifestsRegistryClusterserviceversionYamlBytes() ([]byte, error) {
return bindataRead(
@@ -117,12 +117,12 @@ func manifestsRegistryClusterserviceversionYaml() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "manifests/registry.clusterserviceversion.yaml", size: 4481, mode: os.FileMode(420), modTime: time.Unix(1770048270, 0)}
+ info := bindataFileInfo{name: "manifests/registry.clusterserviceversion.yaml", size: 4485, mode: os.FileMode(420), modTime: time.Unix(1769784020, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
-var _manifestsScriptConfigmapYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xce\xbd\x4e\xc4\x30\x10\x04\xe0\xde\x4f\x31\x1c\xb5\xe3\x0b\x34\x27\x17\x34\xd4\xb4\xf4\x26\x5e\xf0\xea\xf0\x8f\xd6\x4b\xa2\x48\x3c\x3c\x92\x05\x5c\x95\x72\xf7\x1b\x69\x26\x34\x7e\x25\xe9\x5c\x8b\xc7\x3a\x9b\x2b\x97\xe8\xf1\x5c\xcb\x3b\x7f\xbc\x84\x66\x32\x69\x88\x41\x83\x37\x40\x09\x99\x3c\x92\x6a\x8b\xb6\x2f\xc2\x4d\xcd\x1f\x8d\xe7\xd4\x93\xc7\xb7\x01\x80\xfb\x3b\xf7\xc6\xc5\xf5\x34\x2e\x5a\x52\xc5\xe9\xb7\x06\xf3\xf4\x30\x9d\x4f\x03\xf2\x35\xb2\xc0\x36\x38\xcd\xcd\x6d\xdb\x76\x8b\xab\x7c\x11\x9e\xfe\xc1\x75\x0d\xa2\x14\x8f\x03\x42\x21\xee\xc7\xfc\xc9\x2b\x0d\x6d\xbb\xa6\x5a\x1e\x61\xf3\x58\x3d\x75\x92\x95\x04\x97\xf3\x65\x86\xb5\x91\x85\x16\xad\xb2\xdf\x26\xfd\x04\x00\x00\xff\xff\x76\x45\x48\x23\x22\x01\x00\x00")
+var _manifestsScriptConfigmapYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x74\xce\xbf\x6e\x83\x30\x10\xc7\xf1\xdd\x4f\xf1\x2b\x9d\x8d\xa1\x13\xf2\xd0\xa5\x73\xd7\xee\x57\x7c\xc4\x56\x82\xb1\xec\x0b\x24\x52\x1e\x3e\x12\xe4\xcf\xc4\x78\xf7\xf9\x4a\x77\x94\xc2\x1f\xe7\x12\xa6\x68\x31\xb7\xea\x18\xa2\xb3\xf8\x99\xe2\x10\x0e\xbf\x94\xd4\xc8\x42\x8e\x84\xac\x02\x22\x8d\x6c\xe1\x45\x92\xd3\xa5\xcf\x21\x89\x7a\xd2\xba\xac\x8b\xb7\xb8\x29\x00\xf8\xfc\x30\xff\x21\x9a\xe2\xd7\x89\x7b\x3f\xa1\x7a\x9c\x41\x5b\x7f\xd5\x4d\xf5\x06\xc9\x67\xc6\x37\xcc\x4c\xd9\x2c\xcb\x62\x8a\x50\x16\x76\xfb\x41\x66\x72\xd7\x7d\x3e\x85\x99\x37\xbd\x70\xbf\x7d\x06\x3d\x40\xfb\x57\x02\x9d\xd0\x35\x5d\xab\xee\x01\x00\x00\xff\xff\xa2\x23\x09\xd5\xfd\x00\x00\x00")
func manifestsScriptConfigmapYamlBytes() ([]byte, error) {
return bindataRead(
@@ -137,7 +137,7 @@ func manifestsScriptConfigmapYaml() (*asset, error) {
return nil, err
}
- info := bindataFileInfo{name: "manifests/script.configmap.yaml", size: 290, mode: os.FileMode(420), modTime: time.Unix(1770048270, 0)}
+ info := bindataFileInfo{name: "manifests/script.configmap.yaml", size: 253, mode: os.FileMode(420), modTime: time.Unix(1769765371, 0)}
a := &asset{bytes: bytes, info: info}
return a, nil
}
diff --git a/openshift/tests-extension/test/olmv1.go b/openshift/tests-extension/test/olmv1.go
index ba77a1932..5eddb3f91 100644
--- a/openshift/tests-extension/test/olmv1.go
+++ b/openshift/tests-extension/test/olmv1.go
@@ -9,7 +9,6 @@ import (
//nolint:staticcheck // ST1001: dot-imports for readability
. "github.com/onsi/gomega"
- "github.com/openshift/origin/test/extended/util/image"
corev1 "k8s.io/api/core/v1"
apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/api/meta"
@@ -134,7 +133,7 @@ var _ = Describe("[sig-olmv1][OCPFeatureGate:NewOLM] OLMv1 operator installation
// Using the shell image provided by origin as the controller image.
// The image is mirrored into disconnected environments for testing.
- "{{ TEST-CONTROLLER }}": image.ShellImage(),
+ "{{ TEST-CONTROLLER }}": "registry.k8s.io/e2e-test-images/busybox:1.36.1-1",
}
unique, nsName, ccName, opName = helpers.NewCatalogAndClusterBundles(ctx, replacements,
catalogdata.AssetNames, catalogdata.Asset,
diff --git a/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml b/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml
index b1090e321..e6f9340cc 100644
--- a/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml
+++ b/openshift/tests-extension/testdata/operator/manifests/registry.clusterserviceversion.yaml
@@ -69,14 +69,14 @@ spec:
readOnly: true
livenessProbe:
httpGet:
- path: /live
+ path: /healthz
port: 8081
initialDelaySeconds: 15
periodSeconds: 20
name: manager
readinessProbe:
httpGet:
- path: /ready
+ path: /readyz
port: 8081
initialDelaySeconds: 5
periodSeconds: 10
diff --git a/openshift/tests-extension/testdata/operator/manifests/script.configmap.yaml b/openshift/tests-extension/testdata/operator/manifests/script.configmap.yaml
index 76cacfc75..29ba9b0db 100644
--- a/openshift/tests-extension/testdata/operator/manifests/script.configmap.yaml
+++ b/openshift/tests-extension/testdata/operator/manifests/script.configmap.yaml
@@ -6,8 +6,7 @@ data:
httpd.sh: |
#!/bin/sh
echo "Version 1.2.0"
- mkdir -p /tmp/www
- echo true > /tmp/www/started
- echo true > /tmp/www/ready
- echo true > /tmp/www/live
- python3 -m http.server 8081 --directory /tmp/www
+ echo true > /var/www/started
+ echo true > /var/www/ready
+ echo true > /var/www/live
+ exec httpd -f -h /var/www -p 8081
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/.cliff.toml b/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/.cliff.toml
index ae70028b7..702629f5d 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/.cliff.toml
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/.cliff.toml
@@ -78,7 +78,7 @@ body = """
### People who contributed to this release
{% endif %}
{%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %}
- {%- if contributor.username != "dependabot[bot]" %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }})
{%- endif %}
{%- endfor %}
@@ -91,7 +91,7 @@ body = """
{%- endif %}
{%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
- {%- if contributor.username != "dependabot[bot]" %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
* @{{ contributor.username }} made their first contribution
{%- if contributor.pr_number %}
in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md b/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md
index aace4fcfb..03c098316 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md
@@ -4,11 +4,11 @@
| Total Contributors | Total Contributions |
| --- | --- |
-| 12 | 90 |
+| 12 | 95 |
| Username | All Time Contribution Count | All Commits |
| --- | --- | --- |
-| @fredbi | 43 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi |
+| @fredbi | 48 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi |
| @casualjim | 33 | https://github.com/go-openapi/jsonpointer/commits?author=casualjim |
| @magodo | 3 | https://github.com/go-openapi/jsonpointer/commits?author=magodo |
| @youyuanwu | 3 | https://github.com/go-openapi/jsonpointer/commits?author=youyuanwu |
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/README.md b/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/README.md
index 00cbfd741..b61b63fd9 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -8,8 +8,7 @@
[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url]
-
-[![GoDoc][godoc-badge]][godoc-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
+[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
---
@@ -104,6 +103,15 @@ using the special trailing character "-" is not implemented.
* [Maintainers documentation](docs/MAINTAINERS.md)
* [Code style](docs/STYLE.md)
+## Cutting a new release
+
+Maintainers can cut a new release by either:
+
+* running [this workflow](https://github.com/go-openapi/jsonpointer/actions/workflows/bump-release.yml)
+* or pushing a semver tag
+ * signed tags are preferred
+ * The tag message is prepended to release notes
+
[test-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg
[test-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml
@@ -114,8 +122,10 @@ using the special trailing character "-" is not implemented.
[codeql-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/codeql.yml/badge.svg
[codeql-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/codeql.yml
-[release-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg
-[release-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer
+[release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer.svg
+[release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer
+[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg
+[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer
[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonpointer
[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonpointer
@@ -126,8 +136,9 @@ using the special trailing character "-" is not implemented.
[doc-url]: https://goswagger.io/go-openapi
[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer
[godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonpointer
-[slack-badge]: https://slackin.goswagger.io/badge.svg
-[slack-url]: https://slackin.goswagger.io
+[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png
+[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM
+[slack-url]: https://goswagger.slack.com/archives/C04R30YMU
[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
[license-url]: https://github.com/go-openapi/jsonpointer/?tab=Apache-2.0-1-ov-file#readme
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.cliff.toml b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.cliff.toml
new file mode 100644
index 000000000..702629f5d
--- /dev/null
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.cliff.toml
@@ -0,0 +1,181 @@
+# git-cliff ~ configuration file
+# https://git-cliff.org/docs/configuration
+
+[changelog]
+header = """
+"""
+
+footer = """
+
+-----
+
+**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms**
+
+[![License][license-badge]][license-url]
+
+[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
+[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme
+
+{%- macro remote_url() -%}
+ https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
+{%- endmacro -%}
+"""
+
+body = """
+{%- if version %}
+## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
+{%- else %}
+## [unreleased]
+{%- endif %}
+{%- if message %}
+ {%- raw %}\n{% endraw %}
+{{ message }}
+ {%- raw %}\n{% endraw %}
+{%- endif %}
+{%- if version %}
+ {%- if previous.version %}
+
+**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}>
+ {%- endif %}
+{%- else %}
+ {%- raw %}\n{% endraw %}
+{%- endif %}
+
+{%- if statistics %}{% if statistics.commit_count %}
+ {%- raw %}\n{% endraw %}
+{{ statistics.commit_count }} commits in this release.
+ {%- raw %}\n{% endraw %}
+{%- endif %}{% endif %}
+-----
+
+{%- for group, commits in commits | group_by(attribute="group") %}
+ {%- raw %}\n{% endraw %}
+### {{ group | upper_first }}
+ {%- raw %}\n{% endraw %}
+ {%- for commit in commits %}
+ {%- if commit.remote.pr_title %}
+ {%- set commit_message = commit.remote.pr_title %}
+ {%- else %}
+ {%- set commit_message = commit.message %}
+ {%- endif %}
+* {{ commit_message | split(pat="\n") | first | trim }}
+ {%- if commit.remote.username %}
+{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }})
+ {%- endif %}
+ {%- if commit.remote.pr_number %}
+{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }})
+ {%- endif %}
+{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }})
+ {%- endfor %}
+{%- endfor %}
+
+{%- if github %}
+{%- raw %}\n{% endraw -%}
+ {%- set all_contributors = github.contributors | length %}
+ {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %}
+-----
+
+### People who contributed to this release
+ {% endif %}
+ {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
+* [@{{ contributor.username }}](https://github.com/{{ contributor.username }})
+ {%- endif %}
+ {%- endfor %}
+
+ {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
+-----
+ {%- raw %}\n{% endraw %}
+
+### New Contributors
+ {%- endif %}
+
+ {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
+* @{{ contributor.username }} made their first contribution
+ {%- if contributor.pr_number %}
+ in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+{%- endif %}
+
+{%- raw %}\n{% endraw %}
+
+{%- macro remote_url() -%}
+ https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
+{%- endmacro -%}
+"""
+# Remove leading and trailing whitespaces from the changelog's body.
+trim = true
+# Render body even when there are no releases to process.
+render_always = true
+# An array of regex based postprocessors to modify the changelog.
+postprocessors = [
+ # Replace the placeholder with a URL.
+ #{ pattern = '', replace = "https://github.com/orhun/git-cliff" },
+]
+# output file path
+# output = "test.md"
+
+[git]
+# Parse commits according to the conventional commits specification.
+# See https://www.conventionalcommits.org
+conventional_commits = false
+# Exclude commits that do not match the conventional commits specification.
+filter_unconventional = false
+# Require all commits to be conventional.
+# Takes precedence over filter_unconventional.
+require_conventional = false
+# Split commits on newlines, treating each line as an individual commit.
+split_commits = false
+# An array of regex based parsers to modify commit messages prior to further processing.
+commit_preprocessors = [
+ # Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
+ #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"},
+ # Check spelling of the commit message using https://github.com/crate-ci/typos.
+ # If the spelling is incorrect, it will be fixed automatically.
+ #{ pattern = '.*', replace_command = 'typos --write-changes -' }
+]
+# Prevent commits that are breaking from being excluded by commit parsers.
+protect_breaking_commits = false
+# An array of regex based parsers for extracting data from the commit message.
+# Assigns commits to groups.
+# Optionally sets the commit's scope and can decide to exclude commits from further processing.
+commit_parsers = [
+ { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true },
+ { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true },
+ { field = "author.name", pattern = "dependabot*", group = "Updates" },
+ { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" },
+ { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" },
+ { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" },
+ { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" },
+ { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" },
+ { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" },
+ { message = "^test", group = "Testing" },
+ { message = "(^fix)|(panic)", group = "Fixed bugs" },
+ { message = "(^refact)|(rework)", group = "Refactor" },
+ { message = "(^[Pp]erf)|(performance)", group = "Performance" },
+ { message = "(^[Cc]hore)", group = "Miscellaneous tasks" },
+ { message = "^[Rr]evert", group = "Reverted changes" },
+ { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" },
+ { message = ".*", group = "Other" },
+]
+# Exclude commits that are not matched by any commit parser.
+filter_commits = false
+# An array of link parsers for extracting external references, and turning them into URLs, using regex.
+link_parsers = []
+# Include only the tags that belong to the current branch.
+use_branch_tags = false
+# Order releases topologically instead of chronologically.
+topo_order = false
+# Order releases topologically instead of chronologically.
+topo_order_commits = true
+# Order of commits in each group/release within the changelog.
+# Allowed values: newest, oldest
+sort_commits = "newest"
+# Process submodules commits
+recurse_submodules = false
+
+#[remote.github]
+#owner = "go-openapi"
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.editorconfig b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.editorconfig
new file mode 100644
index 000000000..3152da69a
--- /dev/null
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.golangci.yml
index 7cea1af8b..fdae591bc 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.golangci.yml
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/.golangci.yml
@@ -2,34 +2,17 @@ version: "2"
linters:
default: all
disable:
- - cyclop
- depguard
- - errchkjson
- - errorlint
- - exhaustruct
- - forcetypeassert
- funlen
- - gochecknoglobals
- - gochecknoinits
- - gocognit
- - godot
- godox
- - gosmopolitan
- - inamedparam
- #- intrange # disabled while < go1.22
- - ireturn
- - lll
- - musttag
- - nestif
+ - exhaustruct
- nlreturn
- nonamedreturns
- noinlineerr
- paralleltest
- recvcheck
- testpackage
- - thelper
- tparallel
- - unparam
- varnamelen
- whitespace
- wrapcheck
@@ -41,8 +24,15 @@ linters:
goconst:
min-len: 2
min-occurrences: 3
+ cyclop:
+ max-complexity: 20
gocyclo:
- min-complexity: 45
+ min-complexity: 20
+ exhaustive:
+ default-signifies-exhaustive: true
+ default-case-required: true
+ lll:
+ line-length: 180
exclusions:
generated: lax
presets:
@@ -58,6 +48,7 @@ formatters:
enable:
- gofmt
- goimports
+ - gofumpt
exclusions:
generated: lax
paths:
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md
new file mode 100644
index 000000000..9907d5d21
--- /dev/null
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md
@@ -0,0 +1,21 @@
+# Contributors
+
+- Repository: ['go-openapi/jsonreference']
+
+| Total Contributors | Total Contributions |
+| --- | --- |
+| 9 | 68 |
+
+| Username | All Time Contribution Count | All Commits |
+| --- | --- | --- |
+| @fredbi | 31 | https://github.com/go-openapi/jsonreference/commits?author=fredbi |
+| @casualjim | 25 | https://github.com/go-openapi/jsonreference/commits?author=casualjim |
+| @youyuanwu | 5 | https://github.com/go-openapi/jsonreference/commits?author=youyuanwu |
+| @olivierlemasle | 2 | https://github.com/go-openapi/jsonreference/commits?author=olivierlemasle |
+| @apelisse | 1 | https://github.com/go-openapi/jsonreference/commits?author=apelisse |
+| @gbjk | 1 | https://github.com/go-openapi/jsonreference/commits?author=gbjk |
+| @honza | 1 | https://github.com/go-openapi/jsonreference/commits?author=honza |
+| @Neo2308 | 1 | https://github.com/go-openapi/jsonreference/commits?author=Neo2308 |
+| @erraggy | 1 | https://github.com/go-openapi/jsonreference/commits?author=erraggy |
+
+ _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/NOTICE b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/NOTICE
index f9ad7e0f7..f3b51939a 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/NOTICE
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/NOTICE
@@ -8,12 +8,15 @@ by the go-swagger and go-openapi maintainers ("go-swagger maintainers").
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
+
You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0.
+
This software is copied from, derived from, and inspired by other original software products.
It ships with copies of other software which license terms are recalled below.
-The original sofware was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com).
+The original software was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com).
github.com/sigh-399/jsonpointer
===========================
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/README.md b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/README.md
index 2274a4b78..d479dbdc7 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/README.md
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/README.md
@@ -1,18 +1,39 @@
-# gojsonreference [](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonreference)
+# jsonreference
-[](https://slackin.goswagger.io)
-[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE)
-[](https://pkg.go.dev/github.com/go-openapi/jsonreference)
-[](https://goreportcard.com/report/github.com/go-openapi/jsonreference)
+
+[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url]
+
+
+
+[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url]
+
+
+[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
-An implementation of JSON Reference - Go language
+---
+
+An implementation of JSON Reference for golang.
## Status
-Feature complete. Stable API
+
+API is stable.
+
+## Import this library in your project
+
+```cmd
+go get github.com/go-openapi/jsonreference
+```
## Dependencies
+
* https://github.com/go-openapi/jsonpointer
+## Basic usage
+
+## Change log
+
+See
+
## References
* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
@@ -24,3 +45,55 @@ This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE).
See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software
on top of which it has been built.
+
+## Other documentation
+
+* [All-time contributors](./CONTRIBUTORS.md)
+* [Contributing guidelines](.github/CONTRIBUTING.md)
+* [Maintainers documentation](docs/MAINTAINERS.md)
+* [Code style](docs/STYLE.md)
+
+## Cutting a new release
+
+Maintainers can cut a new release by either:
+
+* running [this workflow](https://github.com/go-openapi/jsonreference/actions/workflows/bump-release.yml)
+* or pushing a semver tag
+ * signed tags are preferred
+ * The tag message is prepended to release notes
+
+
+[test-badge]: https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg
+[test-url]: https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml
+[cov-badge]: https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg
+[cov-url]: https://codecov.io/gh/go-openapi/jsonreference
+[vuln-scan-badge]: https://github.com/go-openapi/jsonreference/actions/workflows/scanner.yml/badge.svg
+[vuln-scan-url]: https://github.com/go-openapi/jsonreference/actions/workflows/scanner.yml
+[codeql-badge]: https://github.com/go-openapi/jsonreference/actions/workflows/codeql.yml/badge.svg
+[codeql-url]: https://github.com/go-openapi/jsonreference/actions/workflows/codeql.yml
+
+[release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonreference.svg
+[release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonreference
+[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonreference.svg
+[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonreference
+
+[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonreference
+[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonreference
+[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/jsonreference
+[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/jsonreference
+
+[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F
+[doc-url]: https://goswagger.io/go-openapi
+[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonreference
+[godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonreference
+[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png
+[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM
+[slack-url]: https://goswagger.slack.com/archives/C04R30YMU
+
+[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
+[license-url]: https://github.com/go-openapi/jsonreference/?tab=Apache-2.0-1-ov-file#readme
+
+[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/jsonreference
+[goversion-url]: https://github.com/go-openapi/jsonreference/blob/master/go.mod
+[top-badge]: https://img.shields.io/github/languages/top/go-openapi/jsonreference
+[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/jsonreference/latest
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/SECURITY.md b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/SECURITY.md
new file mode 100644
index 000000000..2a7b6f091
--- /dev/null
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+This policy outlines the commitment and practices of the go-openapi maintainers regarding security.
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.22.x | :white_check_mark: |
+
+## Reporting a vulnerability
+
+If you become aware of a security vulnerability that affects the current repository,
+please report it privately to the maintainers.
+
+Please follow the instructions provided by github to
+[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability).
+
+TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability".
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
index ca79391dc..a08b47320 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
@@ -14,9 +14,11 @@ const (
defaultHTTPSPort = ":443"
)
-// Regular expressions used by the normalizations
-var rxPort = regexp.MustCompile(`(:\d+)/?$`)
-var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+// Regular expressions used by the normalizations.
+var (
+ rxPort = regexp.MustCompile(`(:\d+)/?$`)
+ rxDupSlashes = regexp.MustCompile(`/{2,}`)
+)
// NormalizeURL will normalize the specified URL
// This was added to replace a previous call to the no longer maintained purell library:
diff --git a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/reference.go b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/reference.go
index 33d4798ca..6e3ae4995 100644
--- a/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/reference.go
+++ b/openshift/tests-extension/vendor/github.com/go-openapi/jsonreference/reference.go
@@ -18,7 +18,7 @@ const (
var ErrChildURL = errors.New("child url is nil")
-// Ref represents a json reference object
+// Ref represents a json reference object.
type Ref struct {
referenceURL *url.URL
referencePointer jsonpointer.Pointer
@@ -30,7 +30,7 @@ type Ref struct {
HasFullFilePath bool
}
-// New creates a new reference for the given string
+// New creates a new reference for the given string.
func New(jsonReferenceString string) (Ref, error) {
var r Ref
err := r.parse(jsonReferenceString)
@@ -38,7 +38,7 @@ func New(jsonReferenceString string) (Ref, error) {
}
// MustCreateRef parses the ref string and panics when it's invalid.
-// Use the New method for a version that returns an error
+// Use the New method for a version that returns an error.
func MustCreateRef(ref string) Ref {
r, err := New(ref)
if err != nil {
@@ -48,17 +48,17 @@ func MustCreateRef(ref string) Ref {
return r
}
-// GetURL gets the URL for this reference
+// GetURL gets the URL for this reference.
func (r *Ref) GetURL() *url.URL {
return r.referenceURL
}
-// GetPointer gets the json pointer for this reference
+// GetPointer gets the json pointer for this reference.
func (r *Ref) GetPointer() *jsonpointer.Pointer {
return &r.referencePointer
}
-// String returns the best version of the url for this reference
+// String returns the best version of the url for this reference.
func (r *Ref) String() string {
if r.referenceURL != nil {
return r.referenceURL.String()
@@ -71,7 +71,7 @@ func (r *Ref) String() string {
return r.referencePointer.String()
}
-// IsRoot returns true if this reference is a root document
+// IsRoot returns true if this reference is a root document.
func (r *Ref) IsRoot() bool {
return r.referenceURL != nil &&
!r.IsCanonical() &&
@@ -79,13 +79,13 @@ func (r *Ref) IsRoot() bool {
r.referenceURL.Fragment == ""
}
-// IsCanonical returns true when this pointer starts with http(s):// or file://
+// IsCanonical returns true when this pointer starts with http(s):// or file://.
func (r *Ref) IsCanonical() bool {
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
}
// Inherits creates a new reference from a parent and a child
-// If the child cannot inherit from the parent, an error is returned
+// If the child cannot inherit from the parent, an error is returned.
func (r *Ref) Inherits(child Ref) (*Ref, error) {
childURL := child.GetURL()
parentURL := r.GetURL()
@@ -103,7 +103,7 @@ func (r *Ref) Inherits(child Ref) (*Ref, error) {
return &ref, nil
}
-// "Constructor", parses the given string JSON reference
+// "Constructor", parses the given string JSON reference.
func (r *Ref) parse(jsonReferenceString string) error {
parsed, err := url.Parse(jsonReferenceString)
if err != nil {
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.codespellignore b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.codespellignore
index 2b53a25e1..a6d0cbcc9 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -8,3 +8,4 @@ nam
valu
thirdparty
addOpt
+observ
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml
index b01762ffc..1b1b2aff9 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -197,6 +197,9 @@ linters:
- float-compare
- go-require
- require-error
+ usetesting:
+ context-background: true
+ context-todo: true
exclusions:
generated: lax
presets:
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.lycheeignore b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.lycheeignore
index 532850588..994b677df 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -1,4 +1,5 @@
http://localhost
+https://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
@@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects
https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
-http://4.3.2.1:78/user/123
\ No newline at end of file
+http://4.3.2.1:78/user/123
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317
+# URL works, but it has blocked link checkers.
+https://dl.acm.org/doi/10.1145/198429.198435
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index f3abcfdc2..ecbe0582c 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -11,6 +11,74 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
+## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05
+
+### Added
+
+- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175)
+- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages.
+ This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287)
+- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`.
+ Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353)
+- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434)
+- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486)
+- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512)
+- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524)
+- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571)
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608)
+- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`.
+ All `Processor` implementations now include an `Enabled` method. (#7639)
+- The `go.opentelemetry.io/otel/semconv/v1.38.0` package.
+ The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648)
+
+### Changed
+
+- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set.
+ Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266)
+- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302)
+- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306)
+- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`.
+ ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded.
+ Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default.
+ To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363)
+- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371)
+- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421)
+- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427)
+- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438)
+- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types.
+ If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442)
+
+### Fixed
+
+- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them.
+ Attributes with duplicate keys will use the last value passed. (#7300)
+- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372)
+- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403)
+- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655)
+- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656)
+
+### Removed
+
+- Drop support for [Go 1.23]. (#7274)
+- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`.
+ The `Enabled` method has been added to the `Processor` interface instead.
+ All `Processor` implementations must now implement the `Enabled` method.
+ Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639)
+
## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
This release is the last to support [Go 1.23].
@@ -3430,8 +3498,10 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD
+[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
+[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 0b3ae855c..ff5e1f76e 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel
(This may print some warning about "build constraints exclude all Go
files", just ignore it.)
-This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
-can alternatively use `git` directly with:
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`.
+Alternatively, you can use `git` directly with:
```sh
git clone https://github.com/open-telemetry/opentelemetry-go
@@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go
that name is a kind of a redirector to GitHub that `go get` can
understand, but `git` does not.)
-This would put the project in the `opentelemetry-go` directory in
-current working directory.
+This will add the project as `opentelemetry-go` within the current directory.
Enter the newly created directory and add your fork as a new remote:
@@ -109,7 +108,7 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the
maintainer merging.
- * At least one of the qualified approvals need to be from an
+ * At least one of the qualified approvals needs to be from an
[Approver]/[Maintainer] affiliated with a different company than the author
of the PR.
* PRs introducing changes that have already been discussed and consensus
@@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance
OpenTelemetry is an evolving specification, one where the desires and
-use cases are clear, but the method to satisfy those uses cases are
+use cases are clear, but the methods to satisfy those use cases are
not.
As such, Contributions should provide functionality and behavior that
-conforms to the specification, but the interface and structure is
+conforms to the specification, but the interface and structure are
flexible.
It is preferable to have contributions follow the idioms of the
@@ -217,7 +216,7 @@ about dependency compatibility.
This project does not partition dependencies based on the environment (i.e.
`development`, `staging`, `production`).
-Only the dependencies explicitly included in the released modules have be
+Only the dependencies explicitly included in the released modules have been
tested and verified to work with the released code. No other guarantee is made
about the compatibility of other dependencies.
@@ -635,8 +634,8 @@ is not in their root name.
The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling
-between the two modules where a user can upgrade the parent without the child
-and if the internal package API has changed it will fail to upgrade[^3].
+between the two modules where a user can upgrade the parent without the child,
+and if the internal package API has changed, it will fail to upgrade[^3].
There are two known exceptions to this rule:
@@ -657,7 +656,7 @@ this.
### Ignoring context cancellation
-OpenTelemetry API implementations need to ignore the cancellation of the context that are
+OpenTelemetry API implementations need to ignore the cancellation of the context that is
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
@@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat
should be honored. This means all work done on behalf of the user provided context
should be canceled.
+### Observability
+
+OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself.
+This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications.
+
+This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components.
+
+#### Environment Variable Activation
+
+Observability features are currently experimental.
+They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable.
+This follows the established experimental feature pattern used throughout the SDK.
+
+Components should check for this environment variable using a consistent pattern:
+
+```go
+import "go.opentelemetry.io/otel/*/internal/x"
+
+if x.Observability.Enabled() {
+ // Initialize observability metrics
+}
+```
+
+**References**:
+
+- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go)
+- [sdk](./sdk/internal/x/x.go)
+
+#### Encapsulation
+
+Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`).
+It should not be mixed into the instrumented component.
+
+Prefer this:
+
+```go
+type SDKComponent struct {
+ inst *instrumentation
+}
+
+type instrumentation struct {
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+```
+
+To this:
+
+```go
+// ❌ Avoid this pattern.
+type SDKComponent struct {
+ /* other SDKComponent fields... */
+
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+```
+
+The instrumentation code should not bloat the code being instrumented.
+Likely, this means its own file, or its own package if it is complex or reused.
+
+#### Initialization
+
+Instrumentation setup should be explicit, side-effect free, and local to the relevant component.
+Avoid relying on global or implicit [side effects][side-effect] for initialization.
+
+Encapsulate setup in constructor functions, ensuring clear ownership and scope:
+
+```go
+import (
+ "errors"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
+)
+
+type SDKComponent struct {
+ inst *instrumentation
+}
+
+func NewSDKComponent(config Config) (*SDKComponent, error) {
+ inst, err := newInstrumentation()
+ if err != nil {
+ return nil, err
+ }
+ return &SDKComponent{inst: inst}, nil
+}
+
+type instrumentation struct {
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+
+func newInstrumentation() (*instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ meter := otel.GetMeterProvider().Meter(
+ "",
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ inst := &instrumentation{}
+
+ var err, e error
+ inst.inflight, e = otelconv.NewSDKComponentInflight(meter)
+ err = errors.Join(err, e)
+
+ inst.exported, e = otelconv.NewSDKComponentExported(meter)
+ err = errors.Join(err, e)
+
+ return inst, err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (c *Component) initObservability() {
+ // Initialize observability metrics
+ if !x.Observability.Enabled() {
+ return
+ }
+
+ // Initialize observability metrics
+ c.inst = &instrumentation{/* ... */}
+}
+```
+
+[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science)
+
+#### Performance
+
+When observability is disabled there should be little to no overhead.
+
+```go
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ if e.inst != nil {
+ attrs := expensiveOperation()
+ e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
+ }
+ // Export spans...
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ attrs := expensiveOperation()
+ e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
+ // Export spans...
+}
+
+func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) {
+ if i == nil || i.inflight == nil {
+ return
+ }
+ i.inflight.Add(ctx, count, metric.WithAttributes(attrs...))
+}
+```
+
+When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead.
+
+##### Attribute and Option Allocation Management
+
+Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes.
+
+```go
+var (
+ attrPool = sync.Pool{
+ New: func() any {
+ // Pre-allocate common capacity
+ knownCap := 8 // Adjust based on expected usage
+ s := make([]attribute.KeyValue, 0, knownCap)
+ // Return a pointer to avoid extra allocation on Put().
+ return &s
+ },
+ }
+
+ addOptPool = &sync.Pool{
+ New: func() any {
+ const n = 1 // WithAttributeSet
+ o := make([]metric.AddOption, 0, n)
+ // Return a pointer to avoid extra allocation on Put().
+ return &o
+ },
+ }
+)
+
+func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) {
+ attrs := attrPool.Get().(*[]attribute.KeyValue)
+ defer func() {
+ *attrs = (*attrs)[:0] // Reset.
+ attrPool.Put(attrs)
+ }()
+
+ *attrs = append(*attrs, baseAttrs...)
+ // Add any dynamic attributes.
+ *attrs = append(*attrs, semconv.OTelComponentName("exporter-1"))
+
+ addOpt := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *addOpt = (*addOpt)[:0]
+ addOptPool.Put(addOpt)
+ }()
+
+ set := attribute.NewSet(*attrs...)
+ *addOpt = append(*addOpt, metric.WithAttributeSet(set))
+
+ i.counter.Add(ctx, value, *addOpt...)
+}
+```
+
+Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used.
+This amortizes the cost of allocation and synchronization.
+Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness.
+
+[`sync.Pool`]: https://pkg.go.dev/sync#Pool
+
+##### Cache common attribute sets for repeated measurements
+
+If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes.
+
+```go
+type spanLiveSetKey struct {
+ sampled bool
+}
+
+var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
+ {true}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ ),
+ {false}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ ),
+}
+
+func spanLiveSet(sampled bool) attribute.Set {
+ key := spanLiveSetKey{sampled: sampled}
+ return spanLiveSetCache[key]
+}
+```
+
+##### Benchmarking
+
+Always provide benchmarks when introducing or refactoring instrumentation.
+Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios:
+
+```go
+func BenchmarkExportSpans(b *testing.B) {
+ scenarios := []struct {
+ name string
+ obsEnabled bool
+ }{
+ {"ObsDisabled", false},
+ {"ObsEnabled", true},
+ }
+
+ for _, scenario := range scenarios {
+ b.Run(scenario.name, func(b *testing.B) {
+ b.Setenv(
+ "OTEL_GO_X_OBSERVABILITY",
+ strconv.FormatBool(scenario.obsEnabled),
+ )
+
+ exporter := NewExporter()
+ spans := generateTestSpans(100)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ _ = exporter.ExportSpans(context.Background(), spans)
+ }
+ })
+ }
+}
+```
+
+#### Error Handling and Robustness
+
+Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible.
+
+```go
+func newInstrumentation() (*instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ m := otel.GetMeterProvider().Meter(/* initialize meter */)
+ counter, err := otelconv.NewSDKComponentCounter(m)
+ // Use the partially initialized counter if available.
+ i := &instrumentation{counter: counter}
+ // Return any error to the caller.
+ return i, err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func newInstrumentation() *instrumentation {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ m := otel.GetMeterProvider().Meter(/* initialize meter */)
+ counter, err := otelconv.NewSDKComponentCounter(m)
+ if err != nil {
+ // ❌ Do not dump the error to the OTel Handler. Return it to the
+ // caller.
+ otel.Handle(err)
+ // ❌ Do not return nil if we can still use the partially initialized
+ // counter.
+ return nil
+ }
+ return &instrumentation{counter: counter}
+}
+```
+
+If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`.
+
+#### Context Propagation
+
+Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context:
+
+```go
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ // Use the provided context for observability measurements
+ e.inst.recordSpanExportStarted(ctx, len(spans))
+
+ err := e.doExport(ctx, spans)
+
+ if err != nil {
+ e.inst.recordSpanExportFailed(ctx, len(spans), err)
+ } else {
+ e.inst.recordSpanExportSucceeded(ctx, len(spans))
+ }
+
+ return err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ // ❌ Do not break the context propagation.
+ e.inst.recordSpanExportStarted(context.Background(), len(spans))
+
+ err := e.doExport(ctx, spans)
+
+ /* ... */
+
+ return err
+}
+```
+
+#### Semantic Conventions Compliance
+
+All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md).
+
+Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go).
+
+##### Component Identification
+
+Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes).
+
+If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier.
+
+```go
+componentType := "go.opentelemetry.io/otel/sdk/trace.Span"
+```
+
+```go
+// ❌ Do not do this.
+componentType := "trace-span"
+```
+
+The component name should be a stable unique identifier for the specific instance of the component.
+
+Use a global counter to ensure uniqueness if necessary.
+
+```go
+// Unique 0-based ID counter for component instances.
+var componentIDCounter atomic.Int64
+
+// nextID returns the next unique ID for a component.
+func nextID() int64 {
+ return componentIDCounter.Add(1) - 1
+}
+
+// componentName returns a unique name for the component instance.
+func componentName() attribute.KeyValue {
+ id := nextID()
+ name := fmt.Sprintf("%s/%d", componentType, id)
+ return semconv.OTelComponentName(name)
+}
+```
+
+The component ID will need to be resettable for deterministic testing.
+If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter.
+See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference.
+
+#### Testing
+
+Use deterministic testing with isolated state:
+
+```go
+func TestObservability(t *testing.T) {
+ // Restore state after test to ensure this does not affect other tests.
+ prev := otel.GetMeterProvider()
+ t.Cleanup(func() { otel.SetMeterProvider(prev) })
+
+ // Isolate the meter provider for deterministic testing
+ reader := metric.NewManualReader()
+ meterProvider := metric.NewMeterProvider(metric.WithReader(reader))
+ otel.SetMeterProvider(meterProvider)
+
+ // Use t.Setenv to ensure environment variable is restored after test.
+ t.Setenv("OTEL_GO_X_OBSERVABILITY", "true")
+
+ // Reset component ID counter to ensure deterministic component names.
+ componentIDCounter.Store(0)
+
+ /* ... test code ... */
+}
+```
+
+Test order should not affect results.
+Ensure that any global state (e.g. component ID counters) is reset between tests.
+
## Approvers and Maintainers
### Maintainers
@@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
@@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http
- [Aaron Clawson](https://github.com/MadVikingGod)
- [Anthony Mirabella](https://github.com/Aneurysm9)
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes)
- [Chester Cheung](https://github.com/hanyuancheung)
- [Evan Torrie](https://github.com/evantorrie)
- [Gustavo Silva Paiva](https://github.com/paivagustavo)
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile
index bc0f1f92d..44870248c 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/Makefile
@@ -146,11 +146,12 @@ build-tests/%:
# Tests
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz
.PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
+test-fuzz: ARGS=-fuzztime=10s -fuzz
test-verbose: ARGS=-v -race
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
test-concurrent-safe: TIMEOUT=120
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/README.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/README.md
index 6b7ab5f21..c63359543 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/README.md
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/README.md
@@ -55,25 +55,18 @@ Currently, this project supports the following environments.
|----------|------------|--------------|
| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
-| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
-| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
-| Ubuntu | 1.23 | arm64 |
-| macOS 13 | 1.25 | amd64 |
-| macOS 13 | 1.24 | amd64 |
-| macOS 13 | 1.23 | amd64 |
+| macOS | 1.25 | amd64 |
+| macOS | 1.24 | amd64 |
| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
-| macOS | 1.23 | arm64 |
| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
-| Windows | 1.23 | amd64 |
| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
-| Windows | 1.23 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md
index 1ddcdef03..861756fd7 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit
## Breaking changes validation
-You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
@@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
```
3. Update the [Changelog](./CHANGELOG.md).
- - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+ - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand.
To verify this, you can look directly at the commits since the ``.
```
@@ -107,34 +107,50 @@ It is critical you make sure the version you push upstream is correct.
...
```
-## Release
+## Sign artifacts
-Finally create a Release for the new `` on GitHub.
-The release body should include all the release notes from the Changelog for this release.
+To ensure we comply with CNCF best practices, we need to sign the release artifacts.
-### Sign the Release Artifact
+Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag.
+Both archives need to be signed with your GPG key.
-To ensure we comply with CNCF best practices, we need to sign the release artifact.
-The tarball attached to the GitHub release needs to be signed with your GPG key.
+You can use [this script] to verify the contents of the archives before signing them.
-Follow [these steps] to sign the release artifact and upload it to GitHub.
-You can use [this script] to verify the contents of the tarball before signing it.
+To find your GPG key ID, run:
-Be sure to use the correct GPG key when signing the release artifact.
+```terminal
+gpg --list-secret-keys --keyid-format=long
+```
+
+The key ID is the 16-character string after `sec rsa4096/` (or similar).
+
+Set environment variables and sign both artifacts:
```terminal
-gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz
+export VERSION="" # e.g., v1.32.0
+export KEY_ID=""
+
+gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz
+gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip
```
-You can verify the signature with:
+You can verify the signatures with:
```terminal
-gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz
+gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz
+gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip
```
-[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
[this script]: https://github.com/MrAlias/attest-sh
+## Release
+
+Finally create a Release for the new `` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+
+***IMPORTANT***: GitHub Releases are immutable once created.
+You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later.
+
## Post-Release
### Contrib Repository
@@ -160,14 +176,6 @@ This helps track what changes were included in each release.
Once all related issues and PRs have been added to the milestone, close the milestone.
-### Demo Repository
-
-Bump the dependencies in the following Go services:
-
-- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
-
### Close the `Version Release` issue
Once the todo list in the `Version Release` issue is complete, close the issue.
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/VERSIONING.md b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/VERSIONING.md
index b8cb605c1..b27c9e84f 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/VERSIONING.md
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -83,7 +83,7 @@ is designed so the following goals can be achieved.
in either the module path or the import path.
* In addition to public APIs, telemetry produced by stable instrumentation
will remain stable and backwards compatible. This is to avoid breaking
- alerts and dashboard.
+ alerts and dashboards.
* Modules will be used to encapsulate instrumentation, detectors, exporters,
propagators, and any other independent sets of related components.
* Experimental modules still under active development will be versioned at
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index 6333d34b3..6cc1a1655 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -16,7 +16,7 @@ type (
// set into a wire representation.
Encoder interface {
// Encode returns the serialized encoding of the attribute set using
- // its Iterator. This result may be cached by a attribute.Set.
+ // its Iterator. This result may be cached by an attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of attribute
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go
new file mode 100644
index 000000000..6aa69aeae
--- /dev/null
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/hash.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute/internal/xxhash"
+)
+
+// Type identifiers. These identifiers are hashed before the value of the
+// corresponding type. This is done to distinguish values that are hashed with
+// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and
+// int64(0)).
+//
+// These are all 8 byte length strings converted to a uint64 representation. A
+// uint64 is used instead of the string directly as an optimization, it avoids
+// the for loop in [xxhash] which adds minor overhead.
+const (
+ boolID uint64 = 7953749933313450591 // "_boolean" (little endian)
+ int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian)
+ float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian)
+ stringID uint64 = 6874584755375207263 // "_string_" (little endian)
+ boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian)
+ int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian)
+ float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian)
+ stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian)
+)
+
+// hashKVs returns a new xxHash64 hash of kvs.
+func hashKVs(kvs []KeyValue) uint64 {
+ h := xxhash.New()
+ for _, kv := range kvs {
+ h = hashKV(h, kv)
+ }
+ return h.Sum64()
+}
+
+// hashKV returns the xxHash64 hash of kv with h as the base.
+func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash {
+ h = h.String(string(kv.Key))
+
+ switch kv.Value.Type() {
+ case BOOL:
+ h = h.Uint64(boolID)
+ h = h.Uint64(kv.Value.numeric)
+ case INT64:
+ h = h.Uint64(int64ID)
+ h = h.Uint64(kv.Value.numeric)
+ case FLOAT64:
+ h = h.Uint64(float64ID)
+ // Assumes numeric stored with math.Float64bits.
+ h = h.Uint64(kv.Value.numeric)
+ case STRING:
+ h = h.Uint64(stringID)
+ h = h.String(kv.Value.stringly)
+ case BOOLSLICE:
+ h = h.Uint64(boolSliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Bool(rv.Index(i).Bool())
+ }
+ case INT64SLICE:
+ h = h.Uint64(int64SliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Int64(rv.Index(i).Int())
+ }
+ case FLOAT64SLICE:
+ h = h.Uint64(float64SliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Float64(rv.Index(i).Float())
+ }
+ case STRINGSLICE:
+ h = h.Uint64(stringSliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.String(rv.Index(i).String())
+ }
+ case INVALID:
+ default:
+ // Logging is an alternative, but using the internal logger here
+ // causes an import cycle so it is not done.
+ v := kv.Value.AsInterface()
+ msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v)
+ panic(msg)
+ }
+ return h
+}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go
new file mode 100644
index 000000000..113a97838
--- /dev/null
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package xxhash provides a wrapper around the xxhash library for attribute hashing.
+package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash"
+
+import (
+ "encoding/binary"
+ "math"
+
+ "github.com/cespare/xxhash/v2"
+)
+
+// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values.
+type Hash struct {
+ d *xxhash.Digest
+}
+
+// New returns a new initialized xxHash64 hasher.
+func New() Hash {
+ return Hash{d: xxhash.New()}
+}
+
+func (h Hash) Uint64(val uint64) Hash {
+ var buf [8]byte
+ binary.LittleEndian.PutUint64(buf[:], val)
+ // errors from Write are always nil for xxhash
+ // if it returns an err then panic
+ _, err := h.d.Write(buf[:])
+ if err != nil {
+ panic("xxhash write of uint64 failed: " + err.Error())
+ }
+ return h
+}
+
+func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function.
+ if val {
+ return h.Uint64(1)
+ }
+ return h.Uint64(0)
+}
+
+func (h Hash) Float64(val float64) Hash {
+ return h.Uint64(math.Float64bits(val))
+}
+
+func (h Hash) Int64(val int64) Hash {
+ return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing.
+}
+
+func (h Hash) String(val string) Hash {
+ // errors from WriteString are always nil for xxhash
+ // if it returns an err then panic
+ _, err := h.d.WriteString(val)
+ if err != nil {
+ panic("xxhash write of string failed: " + err.Error())
+ }
+ return h
+}
+
+// Sum64 returns the current hash value.
+func (h Hash) Sum64() uint64 {
+ return h.d.Sum64()
+}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/set.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/set.go
index 64735d382..911d557ee 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -9,6 +9,8 @@ import (
"reflect"
"slices"
"sort"
+
+ "go.opentelemetry.io/otel/attribute/internal/xxhash"
)
type (
@@ -23,19 +25,19 @@ type (
// the Equals method to ensure stable equivalence checking.
//
// Users should also use the Distinct returned from Equivalent as a map key
- // instead of a Set directly. In addition to that type providing guarantees
- // on stable equivalence, it may also provide performance improvements.
+ // instead of a Set directly. Set has relatively poor performance when used
+ // as a map key compared to Distinct.
Set struct {
- equivalent Distinct
+ hash uint64
+ data any
}
- // Distinct is a unique identifier of a Set.
+ // Distinct is an identifier of a Set which is very likely to be unique.
//
- // Distinct is designed to ensure equivalence stability: comparisons will
- // return the same value across versions. For this reason, Distinct should
- // always be used as a map key instead of a Set.
+ // Distinct should be used as a map key instead of a Set for to provide better
+ // performance for map operations.
Distinct struct {
- iface any
+ hash uint64
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -46,15 +48,34 @@ type (
Sortable []KeyValue
)
+// Compile time check these types remain comparable.
+var (
+ _ = isComparable(Set{})
+ _ = isComparable(Distinct{})
+)
+
+func isComparable[T comparable](t T) T { return t }
+
var (
// keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{})
- // emptySet is returned for empty attribute sets.
- emptySet = &Set{
- equivalent: Distinct{
- iface: [0]KeyValue{},
- },
+ // emptyHash is the hash of an empty set.
+ emptyHash = xxhash.New().Sum64()
+
+ // userDefinedEmptySet is an empty set. It was mistakenly exposed to users
+ // as something they can assign to, so it must remain addressable and
+ // mutable.
+ //
+ // This is kept for backwards compatibility, but should not be used in new code.
+ userDefinedEmptySet = &Set{
+ hash: emptyHash,
+ data: [0]KeyValue{},
+ }
+
+ emptySet = Set{
+ hash: emptyHash,
+ data: [0]KeyValue{},
}
)
@@ -62,33 +83,35 @@ var (
//
// This is a convenience provided for optimized calling utility.
func EmptySet() *Set {
- return emptySet
-}
-
-// reflectValue abbreviates reflect.ValueOf(d).
-func (d Distinct) reflectValue() reflect.Value {
- return reflect.ValueOf(d.iface)
+ // Continue to return the pointer to the user-defined empty set for
+ // backwards-compatibility.
+ //
+ // New code should not use this, instead use emptySet.
+ return userDefinedEmptySet
}
// Valid reports whether this value refers to a valid Set.
-func (d Distinct) Valid() bool {
- return d.iface != nil
+func (d Distinct) Valid() bool { return d.hash != 0 }
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (l Set) reflectValue() reflect.Value {
+ return reflect.ValueOf(l.data)
}
// Len returns the number of attributes in this set.
func (l *Set) Len() int {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return 0
}
- return l.equivalent.reflectValue().Len()
+ return l.reflectValue().Len()
}
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return KeyValue{}, false
}
- value := l.equivalent.reflectValue()
+ value := l.reflectValue()
if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for
@@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) {
// Value returns the value of a specified key in this set.
func (l *Set) Value(k Key) (Value, bool) {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return Value{}, false
}
- rValue := l.equivalent.reflectValue()
+ rValue := l.reflectValue()
vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool {
@@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue {
return iter.ToSlice()
}
-// Equivalent returns a value that may be used as a map key. The Distinct type
-// guarantees that the result will equal the equivalent. Distinct value of any
+// Equivalent returns a value that may be used as a map key. Equal Distinct
+// values are very likely to be equivalent attribute Sets. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct {
- if l == nil || !l.equivalent.Valid() {
- return emptySet.equivalent
+ if l == nil || l.hash == 0 {
+ return Distinct{hash: emptySet.hash}
}
- return l.equivalent
+ return Distinct{hash: l.hash}
}
// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
- return l.Equivalent() == o.Equivalent()
+ if l.Equivalent() != o.Equivalent() {
+ return false
+ }
+ if l == nil || l.hash == 0 {
+ l = &emptySet
+ }
+ if o == nil || o.hash == 0 {
+ o = &emptySet
+ }
+ return l.data == o.data
}
// Encoded returns the encoded form of this set, according to encoder.
@@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string {
return encoder.Encode(l.Iter())
}
-func empty() Set {
- return Set{
- equivalent: emptySet.equivalent,
- }
-}
-
// NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
@@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set.
if len(kvs) == 0 {
- return empty(), nil
+ return emptySet, nil
}
// Stable sort so the following de-duplication can implement
@@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if filter != nil {
if div := filteredToFront(kvs, filter); div != 0 {
- return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ return newSet(kvs[div:]), kvs[:div]
}
}
- return Set{equivalent: computeDistinct(kvs)}, nil
+ return newSet(kvs), nil
}
// NewSetWithSortableFiltered returns a new Set.
@@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out.
- return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ return newSet(slice[1:]), slice[:1]
}
// Move the filtered slice[first] to the front (preserving order).
@@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
// Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1
- return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
+ return newSet(slice[div:]), slice[:div]
}
-// computeDistinct returns a Distinct using either the fixed- or
-// reflect-oriented code path, depending on the size of the input. The input
-// slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
- iface := computeDistinctFixed(kvs)
- if iface == nil {
- iface = computeDistinctReflect(kvs)
+// newSet returns a new set based on the sorted and uniqued kvs.
+func newSet(kvs []KeyValue) Set {
+ s := Set{
+ hash: hashKVs(kvs),
+ data: computeDataFixed(kvs),
}
- return Distinct{
- iface: iface,
+ if s.data == nil {
+ s.data = computeDataReflect(kvs)
}
+ return s
}
-// computeDistinctFixed computes a Distinct for small slices. It returns nil
-// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) any {
+// computeDataFixed computes a Set data for small slices. It returns nil if the
+// input is too large for this code path.
+func computeDataFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any {
}
}
-// computeDistinctReflect computes a Distinct using reflection, works for any
-// size input.
-func computeDistinctReflect(kvs []KeyValue) any {
+// computeDataReflect computes a Set data using reflection, works for any size
+// input.
+func computeDataReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any {
// MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) {
- return json.Marshal(l.equivalent.iface)
+ return json.Marshal(l.data)
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go
index e584b2477..24f1fa37d 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
+ idx := int(i) - 0
+ if i < 0 || idx >= len(_Type_index)-1 {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
+ return _Type_name[_Type_index[idx]:_Type_index[idx+1]]
}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index f83a448ec..78e98c4c0 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// If we couldn't find any valid key character,
// it means the key is either empty or invalid.
if keyStart == keyEnd {
- return
+ return p, ok
}
// Skip spaces after the key: " key< >= value ".
@@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// A key can have no value, like: " key ".
ok = true
p.key = s[keyStart:keyEnd]
- return
+ return p, ok
}
// If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid.
if s[index] != keyValueDelimiter[0] {
- return
+ return p, ok
}
// Attempting to parse the value.
@@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// we have not reached the end, it means the property is
// invalid, something like: " key = value value1".
if index != len(s) {
- return
+ return p, ok
}
// Decode a percent-encoded value.
rawVal := s[valueStart:valueEnd]
unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
- return
+ return p, ok
}
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
@@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
p.hasValue = true
p.value = value
- return
+ return p, ok
}
func skipSpace(s string, offset int) int {
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
index a311fbb48..cadb87cc0 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
-FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
+FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index adb37b5b0..6db969f73 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -105,7 +105,7 @@ type delegatedInstrument interface {
setDelegate(metric.Meter)
}
-// instID are the identifying properties of a instrument.
+// instID are the identifying properties of an instrument.
type instID struct {
// name is the name of the stream.
name string
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric.go
index 1e6473b32..527d9aec8 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric.go
@@ -11,7 +11,7 @@ import (
// Meter returns a Meter from the global MeterProvider. The name must be the
// name of the library providing instrumentation. This name may be the same as
// the instrumented code only if that code provides built-in instrumentation.
-// If the name is empty, then a implementation defined default name will be
+// If the name is empty, then an implementation defined default name will be
// used instead.
//
// If this is called before a global MeterProvider is registered the returned
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric/config.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric/config.go
index d9e3b13e4..e42dd6e70 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric/config.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -3,7 +3,11 @@
package metric // import "go.opentelemetry.io/otel/metric"
-import "go.opentelemetry.io/otel/attribute"
+import (
+ "slices"
+
+ "go.opentelemetry.io/otel/attribute"
+)
// MeterConfig contains options for Meters.
type MeterConfig struct {
@@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption {
})
}
-// WithInstrumentationAttributes sets the instrumentation attributes.
+// WithInstrumentationAttributes adds the instrumentation attributes.
+//
+// This is equivalent to calling [WithInstrumentationAttributeSet] with an
+// [attribute.Set] created from a clone of the passed attributes.
+// [WithInstrumentationAttributeSet] is recommended for more control.
//
-// The passed attributes will be de-duplicated.
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+ set := attribute.NewSet(slices.Clone(attr)...)
+ return WithInstrumentationAttributeSet(set)
+}
+
+// WithInstrumentationAttributeSet adds the instrumentation attributes.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
+func WithInstrumentationAttributeSet(set attribute.Set) MeterOption {
+ if set.Len() == 0 {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ return config
+ })
+ }
+
return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.attrs = attribute.NewSet(attr...)
+ if config.attrs.Len() == 0 {
+ config.attrs = set
+ } else {
+ config.attrs = mergeSets(config.attrs, set)
+ }
return config
})
}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 6692d2665..271ab71f1 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
}
// Clear all flags other than the trace-context supported sampling bit.
- scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+ scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked.
// Ignore the error returned here. Failure to parse tracestate MUST NOT
// affect the parsing of traceparent according to the W3C tracecontext
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
index 666bded4b..267979c05 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
@@ -4,28 +4,53 @@
package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
import (
- "fmt"
"reflect"
"go.opentelemetry.io/otel/attribute"
)
// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+//
+// If err is nil, the returned attribute has the default value
+// [ErrorTypeOther].
+//
+// If err's type has the method
+//
+// ErrorType() string
+//
+// then the returned attribute has the value of err.ErrorType(). Otherwise, the
+// returned attribute has a value derived from the concrete type of err.
+//
+// The key of the returned attribute is [ErrorTypeKey].
func ErrorType(err error) attribute.KeyValue {
if err == nil {
return ErrorTypeOther
}
- t := reflect.TypeOf(err)
- var value string
- if t.PkgPath() == "" && t.Name() == "" {
- // Likely a builtin type.
- value = t.String()
- } else {
- value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+
+ return ErrorTypeKey.String(errorType(err))
+}
+
+func errorType(err error) string {
+ var s string
+ if et, ok := err.(interface{ ErrorType() string }); ok {
+ // Prioritize the ErrorType method if available.
+ s = et.ErrorType()
}
+ if s == "" {
+ // Fallback to reflection if the ErrorType method is not supported or
+ // returns an empty value.
- if value == "" {
- return ErrorTypeOther
+ t := reflect.TypeOf(err)
+ pkg, name := t.PkgPath(), t.Name()
+ if pkg != "" && name != "" {
+ s = pkg + "." + name
+ } else {
+ // The type has no package path or name (predeclared, not-defined,
+ // or alias for a not-defined type).
+ //
+ // This is not guaranteed to be unique, but is a best effort.
+ s = t.String()
+ }
}
- return ErrorTypeKey.String(value)
+ return s
}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
index 55bde895d..a0ddf652d 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
@@ -91,6 +91,11 @@ type ClientActiveRequests struct {
metric.Int64UpDownCounter
}
+var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP requests."),
+ metric.WithUnit("{request}"),
+}
+
// NewClientActiveRequests returns a new ClientActiveRequests instrument.
func NewClientActiveRequests(
m metric.Meter,
@@ -101,15 +106,18 @@ func NewClientActiveRequests(
return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientActiveRequestsOpts
+ } else {
+ opt = append(opt, newClientActiveRequestsOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"http.client.active_requests",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("Number of active HTTP requests."),
- metric.WithUnit("{request}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
}
return ClientActiveRequests{i}, nil
}
@@ -223,6 +231,11 @@ type ClientConnectionDuration struct {
metric.Float64Histogram
}
+var newClientConnectionDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
+ metric.WithUnit("s"),
+}
+
// NewClientConnectionDuration returns a new ClientConnectionDuration instrument.
func NewClientConnectionDuration(
m metric.Meter,
@@ -233,15 +246,18 @@ func NewClientConnectionDuration(
return ClientConnectionDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientConnectionDurationOpts
+ } else {
+ opt = append(opt, newClientConnectionDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"http.client.connection.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientConnectionDuration{noop.Float64Histogram{}}, err
+ return ClientConnectionDuration{noop.Float64Histogram{}}, err
}
return ClientConnectionDuration{i}, nil
}
@@ -310,6 +326,7 @@ func (m ClientConnectionDuration) Record(
func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -353,6 +370,11 @@ type ClientOpenConnections struct {
metric.Int64UpDownCounter
}
+var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
+ metric.WithUnit("{connection}"),
+}
+
// NewClientOpenConnections returns a new ClientOpenConnections instrument.
func NewClientOpenConnections(
m metric.Meter,
@@ -363,15 +385,18 @@ func NewClientOpenConnections(
return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientOpenConnectionsOpts
+ } else {
+ opt = append(opt, newClientOpenConnectionsOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"http.client.open_connections",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
- metric.WithUnit("{connection}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
}
return ClientOpenConnections{i}, nil
}
@@ -488,6 +513,11 @@ type ClientRequestBodySize struct {
metric.Int64Histogram
}
+var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client request bodies."),
+ metric.WithUnit("By"),
+}
+
// NewClientRequestBodySize returns a new ClientRequestBodySize instrument.
func NewClientRequestBodySize(
m metric.Meter,
@@ -498,15 +528,18 @@ func NewClientRequestBodySize(
return ClientRequestBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientRequestBodySizeOpts
+ } else {
+ opt = append(opt, newClientRequestBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.client.request.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP client request bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientRequestBodySize{noop.Int64Histogram{}}, err
+ return ClientRequestBodySize{noop.Int64Histogram{}}, err
}
return ClientRequestBodySize{i}, nil
}
@@ -593,6 +626,7 @@ func (m ClientRequestBodySize) Record(
func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -662,6 +696,11 @@ type ClientRequestDuration struct {
metric.Float64Histogram
}
+var newClientRequestDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP client requests."),
+ metric.WithUnit("s"),
+}
+
// NewClientRequestDuration returns a new ClientRequestDuration instrument.
func NewClientRequestDuration(
m metric.Meter,
@@ -672,15 +711,18 @@ func NewClientRequestDuration(
return ClientRequestDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientRequestDurationOpts
+ } else {
+ opt = append(opt, newClientRequestDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"http.client.request.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("Duration of HTTP client requests."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientRequestDuration{noop.Float64Histogram{}}, err
+ return ClientRequestDuration{noop.Float64Histogram{}}, err
}
return ClientRequestDuration{i}, nil
}
@@ -753,6 +795,7 @@ func (m ClientRequestDuration) Record(
func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -822,6 +865,11 @@ type ClientResponseBodySize struct {
metric.Int64Histogram
}
+var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client response bodies."),
+ metric.WithUnit("By"),
+}
+
// NewClientResponseBodySize returns a new ClientResponseBodySize instrument.
func NewClientResponseBodySize(
m metric.Meter,
@@ -832,15 +880,18 @@ func NewClientResponseBodySize(
return ClientResponseBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientResponseBodySizeOpts
+ } else {
+ opt = append(opt, newClientResponseBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.client.response.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP client response bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientResponseBodySize{noop.Int64Histogram{}}, err
+ return ClientResponseBodySize{noop.Int64Histogram{}}, err
}
return ClientResponseBodySize{i}, nil
}
@@ -927,6 +978,7 @@ func (m ClientResponseBodySize) Record(
func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -996,6 +1048,11 @@ type ServerActiveRequests struct {
metric.Int64UpDownCounter
}
+var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP server requests."),
+ metric.WithUnit("{request}"),
+}
+
// NewServerActiveRequests returns a new ServerActiveRequests instrument.
func NewServerActiveRequests(
m metric.Meter,
@@ -1006,15 +1063,18 @@ func NewServerActiveRequests(
return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerActiveRequestsOpts
+ } else {
+ opt = append(opt, newServerActiveRequestsOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"http.server.active_requests",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("Number of active HTTP server requests."),
- metric.WithUnit("{request}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
}
return ServerActiveRequests{i}, nil
}
@@ -1118,6 +1178,11 @@ type ServerRequestBodySize struct {
metric.Int64Histogram
}
+var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server request bodies."),
+ metric.WithUnit("By"),
+}
+
// NewServerRequestBodySize returns a new ServerRequestBodySize instrument.
func NewServerRequestBodySize(
m metric.Meter,
@@ -1128,15 +1193,18 @@ func NewServerRequestBodySize(
return ServerRequestBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerRequestBodySizeOpts
+ } else {
+ opt = append(opt, newServerRequestBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.server.request.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP server request bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerRequestBodySize{noop.Int64Histogram{}}, err
+ return ServerRequestBodySize{noop.Int64Histogram{}}, err
}
return ServerRequestBodySize{i}, nil
}
@@ -1220,6 +1288,7 @@ func (m ServerRequestBodySize) Record(
func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1299,6 +1368,11 @@ type ServerRequestDuration struct {
metric.Float64Histogram
}
+var newServerRequestDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP server requests."),
+ metric.WithUnit("s"),
+}
+
// NewServerRequestDuration returns a new ServerRequestDuration instrument.
func NewServerRequestDuration(
m metric.Meter,
@@ -1309,15 +1383,18 @@ func NewServerRequestDuration(
return ServerRequestDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerRequestDurationOpts
+ } else {
+ opt = append(opt, newServerRequestDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"http.server.request.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("Duration of HTTP server requests."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerRequestDuration{noop.Float64Histogram{}}, err
+ return ServerRequestDuration{noop.Float64Histogram{}}, err
}
return ServerRequestDuration{i}, nil
}
@@ -1387,6 +1464,7 @@ func (m ServerRequestDuration) Record(
func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1466,6 +1544,11 @@ type ServerResponseBodySize struct {
metric.Int64Histogram
}
+var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server response bodies."),
+ metric.WithUnit("By"),
+}
+
// NewServerResponseBodySize returns a new ServerResponseBodySize instrument.
func NewServerResponseBodySize(
m metric.Meter,
@@ -1476,15 +1559,18 @@ func NewServerResponseBodySize(
return ServerResponseBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerResponseBodySizeOpts
+ } else {
+ opt = append(opt, newServerResponseBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.server.response.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP server response bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerResponseBodySize{noop.Int64Histogram{}}, err
+ return ServerResponseBodySize{noop.Int64Histogram{}}, err
}
return ServerResponseBodySize{i}, nil
}
@@ -1568,6 +1654,7 @@ func (m ServerResponseBodySize) Record(
func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1638,4 +1725,4 @@ func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue {
// the category of synthetic traffic, such as tests or bots.
func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
return attribute.String("user_agent.synthetic.type", string(val))
-}
\ No newline at end of file
+}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
index a78eafd1f..fd064530c 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
@@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Package httpconv provides types and functionality for OpenTelemetry semantic
+// Package otelconv provides types and functionality for OpenTelemetry semantic
// conventions in the "otel" namespace.
package otelconv
@@ -172,6 +172,11 @@ type SDKExporterLogExported struct {
metric.Int64Counter
}
+var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument.
func NewSDKExporterLogExported(
m metric.Meter,
@@ -182,15 +187,18 @@ func NewSDKExporterLogExported(
return SDKExporterLogExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterLogExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterLogExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.log.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterLogExported{noop.Int64Counter{}}, err
+ return SDKExporterLogExported{noop.Int64Counter{}}, err
}
return SDKExporterLogExported{i}, nil
}
@@ -319,6 +327,11 @@ type SDKExporterLogInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument.
func NewSDKExporterLogInflight(
m metric.Meter,
@@ -329,15 +342,18 @@ func NewSDKExporterLogInflight(
return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterLogInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterLogInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.log.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterLogInflight{i}, nil
}
@@ -449,6 +465,11 @@ type SDKExporterMetricDataPointExported struct {
metric.Int64Counter
}
+var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
+ metric.WithUnit("{data_point}"),
+}
+
// NewSDKExporterMetricDataPointExported returns a new
// SDKExporterMetricDataPointExported instrument.
func NewSDKExporterMetricDataPointExported(
@@ -460,15 +481,18 @@ func NewSDKExporterMetricDataPointExported(
return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterMetricDataPointExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterMetricDataPointExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.metric_data_point.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
- metric.WithUnit("{data_point}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
}
return SDKExporterMetricDataPointExported{i}, nil
}
@@ -598,6 +622,11 @@ type SDKExporterMetricDataPointInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{data_point}"),
+}
+
// NewSDKExporterMetricDataPointInflight returns a new
// SDKExporterMetricDataPointInflight instrument.
func NewSDKExporterMetricDataPointInflight(
@@ -609,15 +638,18 @@ func NewSDKExporterMetricDataPointInflight(
return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterMetricDataPointInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterMetricDataPointInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.metric_data_point.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{data_point}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterMetricDataPointInflight{i}, nil
}
@@ -728,6 +760,11 @@ type SDKExporterOperationDuration struct {
metric.Float64Histogram
}
+var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of exporting a batch of telemetry records."),
+ metric.WithUnit("s"),
+}
+
// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration
// instrument.
func NewSDKExporterOperationDuration(
@@ -739,15 +776,18 @@ func NewSDKExporterOperationDuration(
return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterOperationDurationOpts
+ } else {
+ opt = append(opt, newSDKExporterOperationDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"otel.sdk.exporter.operation.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of exporting a batch of telemetry records."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
}
return SDKExporterOperationDuration{i}, nil
}
@@ -825,6 +865,7 @@ func (m SDKExporterOperationDuration) Record(
func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -893,6 +934,11 @@ type SDKExporterSpanExported struct {
metric.Int64Counter
}
+var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument.
func NewSDKExporterSpanExported(
m metric.Meter,
@@ -903,15 +949,18 @@ func NewSDKExporterSpanExported(
return SDKExporterSpanExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterSpanExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterSpanExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.span.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterSpanExported{noop.Int64Counter{}}, err
+ return SDKExporterSpanExported{noop.Int64Counter{}}, err
}
return SDKExporterSpanExported{i}, nil
}
@@ -1040,6 +1089,11 @@ type SDKExporterSpanInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument.
func NewSDKExporterSpanInflight(
m metric.Meter,
@@ -1050,15 +1104,18 @@ func NewSDKExporterSpanInflight(
return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterSpanInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterSpanInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.span.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterSpanInflight{i}, nil
}
@@ -1169,6 +1226,11 @@ type SDKLogCreated struct {
metric.Int64Counter
}
+var newSDKLogCreatedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKLogCreated returns a new SDKLogCreated instrument.
func NewSDKLogCreated(
m metric.Meter,
@@ -1179,15 +1241,18 @@ func NewSDKLogCreated(
return SDKLogCreated{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKLogCreatedOpts
+ } else {
+ opt = append(opt, newSDKLogCreatedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.log.created",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKLogCreated{noop.Int64Counter{}}, err
+ return SDKLogCreated{noop.Int64Counter{}}, err
}
return SDKLogCreated{i}, nil
}
@@ -1254,6 +1319,11 @@ type SDKMetricReaderCollectionDuration struct {
metric.Float64Histogram
}
+var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the collect operation of the metric reader."),
+ metric.WithUnit("s"),
+}
+
// NewSDKMetricReaderCollectionDuration returns a new
// SDKMetricReaderCollectionDuration instrument.
func NewSDKMetricReaderCollectionDuration(
@@ -1265,15 +1335,18 @@ func NewSDKMetricReaderCollectionDuration(
return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKMetricReaderCollectionDurationOpts
+ } else {
+ opt = append(opt, newSDKMetricReaderCollectionDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"otel.sdk.metric_reader.collection.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of the collect operation of the metric reader."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
}
return SDKMetricReaderCollectionDuration{i}, nil
}
@@ -1343,6 +1416,7 @@ func (m SDKMetricReaderCollectionDuration) Record(
func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1384,6 +1458,11 @@ type SDKProcessorLogProcessed struct {
metric.Int64Counter
}
+var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument.
func NewSDKProcessorLogProcessed(
m metric.Meter,
@@ -1394,15 +1473,18 @@ func NewSDKProcessorLogProcessed(
return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogProcessedOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogProcessedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.processor.log.processed",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorLogProcessed{i}, nil
}
@@ -1515,6 +1597,11 @@ type SDKProcessorLogQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity
// instrument.
func NewSDKProcessorLogQueueCapacity(
@@ -1526,15 +1613,18 @@ func NewSDKProcessorLogQueueCapacity(
return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogQueueCapacityOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogQueueCapacityOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.capacity",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueCapacity{i}, nil
}
@@ -1581,6 +1671,11 @@ type SDKProcessorLogQueueSize struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument.
func NewSDKProcessorLogQueueSize(
m metric.Meter,
@@ -1591,15 +1686,18 @@ func NewSDKProcessorLogQueueSize(
return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogQueueSizeOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogQueueSizeOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.size",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueSize{i}, nil
}
@@ -1646,6 +1744,11 @@ type SDKProcessorSpanProcessed struct {
metric.Int64Counter
}
+var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed
// instrument.
func NewSDKProcessorSpanProcessed(
@@ -1657,15 +1760,18 @@ func NewSDKProcessorSpanProcessed(
return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanProcessedOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanProcessedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.processor.span.processed",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorSpanProcessed{i}, nil
}
@@ -1778,6 +1884,11 @@ type SDKProcessorSpanQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity
// instrument.
func NewSDKProcessorSpanQueueCapacity(
@@ -1789,15 +1900,18 @@ func NewSDKProcessorSpanQueueCapacity(
return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanQueueCapacityOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.capacity",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueCapacity{i}, nil
}
@@ -1844,6 +1958,11 @@ type SDKProcessorSpanQueueSize struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize
// instrument.
func NewSDKProcessorSpanQueueSize(
@@ -1855,15 +1974,18 @@ func NewSDKProcessorSpanQueueSize(
return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanQueueSizeOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanQueueSizeOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.size",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueSize{i}, nil
}
@@ -1910,6 +2032,11 @@ type SDKSpanLive struct {
metric.Int64UpDownCounter
}
+var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKSpanLive returns a new SDKSpanLive instrument.
func NewSDKSpanLive(
m metric.Meter,
@@ -1920,15 +2047,18 @@ func NewSDKSpanLive(
return SDKSpanLive{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKSpanLiveOpts
+ } else {
+ opt = append(opt, newSDKSpanLiveOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.span.live",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKSpanLive{noop.Int64UpDownCounter{}}, err
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, err
}
return SDKSpanLive{i}, nil
}
@@ -2013,6 +2143,11 @@ type SDKSpanStarted struct {
metric.Int64Counter
}
+var newSDKSpanStartedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of created spans."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKSpanStarted returns a new SDKSpanStarted instrument.
func NewSDKSpanStarted(
m metric.Meter,
@@ -2023,15 +2158,18 @@ func NewSDKSpanStarted(
return SDKSpanStarted{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKSpanStartedOpts
+ } else {
+ opt = append(opt, newSDKSpanStartedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.span.started",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of created spans."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKSpanStarted{noop.Int64Counter{}}, err
+ return SDKSpanStarted{noop.Int64Counter{}}, err
}
return SDKSpanStarted{i}, nil
}
@@ -2123,4 +2261,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K
// value of the sampler for this span.
func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
return attribute.String("otel.span.sampling_result", string(val))
-}
\ No newline at end of file
+}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/config.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/config.go
index aea11a2b5..d9ecef1ca 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -4,6 +4,7 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
+ "slices"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption {
})
}
-// WithInstrumentationAttributes sets the instrumentation attributes.
+// mergeSets returns the union of keys between a and b. Any duplicate keys will
+// use the value associated with b.
+func mergeSets(a, b attribute.Set) attribute.Set {
+ // NewMergeIterator uses the first value for any duplicates.
+ iter := attribute.NewMergeIterator(&b, &a)
+ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for iter.Next() {
+ merged = append(merged, iter.Attribute())
+ }
+ return attribute.NewSet(merged...)
+}
+
+// WithInstrumentationAttributes adds the instrumentation attributes.
//
-// The passed attributes will be de-duplicated.
+// This is equivalent to calling [WithInstrumentationAttributeSet] with an
+// [attribute.Set] created from a clone of the passed attributes.
+// [WithInstrumentationAttributeSet] is recommended for more control.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ set := attribute.NewSet(slices.Clone(attr)...)
+ return WithInstrumentationAttributeSet(set)
+}
+
+// WithInstrumentationAttributeSet adds the instrumentation attributes.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
+func WithInstrumentationAttributeSet(set attribute.Set) TracerOption {
+ if set.Len() == 0 {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ return config
+ })
+ }
+
return tracerOptionFunc(func(config TracerConfig) TracerConfig {
- config.attrs = attribute.NewSet(attr...)
+ if config.attrs.Len() == 0 {
+ config.attrs = set
+ } else {
+ config.attrs = mergeSets(config.attrs, set)
+ }
return config
})
}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/span.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/span.go
index d3aa476ee..d01e79366 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/span.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -66,6 +66,10 @@ type Span interface {
// SetAttributes sets kv as attributes of the Span. If a key from kv
// already exists for an attribute of the Span it will be overwritten with
// the value contained in kv.
+ //
+ // Note that adding attributes at span creation using [WithAttributes] is preferred
+ // to calling SetAttribute later, as samplers can only consider information
+ // already present during span creation.
SetAttributes(kv ...attribute.KeyValue)
// TracerProvider returns a TracerProvider that can be used to generate
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go
index bcaa5aa53..0d5b02918 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.38.0"
+ return "1.39.0"
}
diff --git a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml
index 07145e254..f4a3893eb 100644
--- a/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/openshift/tests-extension/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.38.0
+ version: v1.39.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.60.0
+ version: v0.61.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.14.0
+ version: v0.15.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@@ -36,9 +36,28 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.13
+ version: v0.0.14
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- go.opentelemetry.io/otel/trace/internal/telemetry/test
+modules:
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/prometheus:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp:
+ version-refs:
+ - ./internal/version.go
diff --git a/openshift/tests-extension/vendor/modules.txt b/openshift/tests-extension/vendor/modules.txt
index 794381c8e..8ecd5e21b 100644
--- a/openshift/tests-extension/vendor/modules.txt
+++ b/openshift/tests-extension/vendor/modules.txt
@@ -46,10 +46,10 @@ github.com/go-logr/logr/funcr
# github.com/go-logr/stdr v1.2.2
## explicit; go 1.16
github.com/go-logr/stdr
-# github.com/go-openapi/jsonpointer v0.22.3
+# github.com/go-openapi/jsonpointer v0.22.4
## explicit; go 1.24.0
github.com/go-openapi/jsonpointer
-# github.com/go-openapi/jsonreference v0.21.3
+# github.com/go-openapi/jsonreference v0.21.4
## explicit; go 1.24.0
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
@@ -347,11 +347,12 @@ go.opentelemetry.io/auto/sdk/internal/telemetry
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
-# go.opentelemetry.io/otel v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel v1.39.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/attribute/internal
+go.opentelemetry.io/otel/attribute/internal/xxhash
go.opentelemetry.io/otel/baggage
go.opentelemetry.io/otel/codes
go.opentelemetry.io/otel/internal/baggage
@@ -372,8 +373,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/metric v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/metric v1.39.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
@@ -386,8 +387,8 @@ go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
go.opentelemetry.io/otel/sdk/trace/internal/x
-# go.opentelemetry.io/otel/trace v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/trace v1.39.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
diff --git a/requirements.txt b/requirements.txt
index 19dcbbfd5..25d91056c 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -30,6 +30,6 @@ readtime==3.0.0
regex==2026.1.15
requests==2.32.5
six==1.17.0
-soupsieve==2.8.1
+soupsieve==2.8.3
urllib3==2.6.3
watchdog==6.0.0
diff --git a/vendor/github.com/go-openapi/jsonpointer/.cliff.toml b/vendor/github.com/go-openapi/jsonpointer/.cliff.toml
index ae70028b7..702629f5d 100644
--- a/vendor/github.com/go-openapi/jsonpointer/.cliff.toml
+++ b/vendor/github.com/go-openapi/jsonpointer/.cliff.toml
@@ -78,7 +78,7 @@ body = """
### People who contributed to this release
{% endif %}
{%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %}
- {%- if contributor.username != "dependabot[bot]" %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
* [@{{ contributor.username }}](https://github.com/{{ contributor.username }})
{%- endif %}
{%- endfor %}
@@ -91,7 +91,7 @@ body = """
{%- endif %}
{%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
- {%- if contributor.username != "dependabot[bot]" %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
* @{{ contributor.username }} made their first contribution
{%- if contributor.pr_number %}
in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
diff --git a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md
index aace4fcfb..03c098316 100644
--- a/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md
+++ b/vendor/github.com/go-openapi/jsonpointer/CONTRIBUTORS.md
@@ -4,11 +4,11 @@
| Total Contributors | Total Contributions |
| --- | --- |
-| 12 | 90 |
+| 12 | 95 |
| Username | All Time Contribution Count | All Commits |
| --- | --- | --- |
-| @fredbi | 43 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi |
+| @fredbi | 48 | https://github.com/go-openapi/jsonpointer/commits?author=fredbi |
| @casualjim | 33 | https://github.com/go-openapi/jsonpointer/commits?author=casualjim |
| @magodo | 3 | https://github.com/go-openapi/jsonpointer/commits?author=magodo |
| @youyuanwu | 3 | https://github.com/go-openapi/jsonpointer/commits?author=youyuanwu |
diff --git a/vendor/github.com/go-openapi/jsonpointer/README.md b/vendor/github.com/go-openapi/jsonpointer/README.md
index 00cbfd741..b61b63fd9 100644
--- a/vendor/github.com/go-openapi/jsonpointer/README.md
+++ b/vendor/github.com/go-openapi/jsonpointer/README.md
@@ -8,8 +8,7 @@
[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url]
-
-[![GoDoc][godoc-badge]][godoc-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
+[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
---
@@ -104,6 +103,15 @@ using the special trailing character "-" is not implemented.
* [Maintainers documentation](docs/MAINTAINERS.md)
* [Code style](docs/STYLE.md)
+## Cutting a new release
+
+Maintainers can cut a new release by either:
+
+* running [this workflow](https://github.com/go-openapi/jsonpointer/actions/workflows/bump-release.yml)
+* or pushing a semver tag
+ * signed tags are preferred
+ * The tag message is prepended to release notes
+
[test-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml/badge.svg
[test-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/go-test.yml
@@ -114,8 +122,10 @@ using the special trailing character "-" is not implemented.
[codeql-badge]: https://github.com/go-openapi/jsonpointer/actions/workflows/codeql.yml/badge.svg
[codeql-url]: https://github.com/go-openapi/jsonpointer/actions/workflows/codeql.yml
-[release-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg
-[release-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer
+[release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer.svg
+[release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonpointer
+[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer.svg
+[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonpointer
[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonpointer
[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonpointer
@@ -126,8 +136,9 @@ using the special trailing character "-" is not implemented.
[doc-url]: https://goswagger.io/go-openapi
[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonpointer
[godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonpointer
-[slack-badge]: https://slackin.goswagger.io/badge.svg
-[slack-url]: https://slackin.goswagger.io
+[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png
+[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM
+[slack-url]: https://goswagger.slack.com/archives/C04R30YMU
[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
[license-url]: https://github.com/go-openapi/jsonpointer/?tab=Apache-2.0-1-ov-file#readme
diff --git a/vendor/github.com/go-openapi/jsonreference/.cliff.toml b/vendor/github.com/go-openapi/jsonreference/.cliff.toml
new file mode 100644
index 000000000..702629f5d
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/.cliff.toml
@@ -0,0 +1,181 @@
+# git-cliff ~ configuration file
+# https://git-cliff.org/docs/configuration
+
+[changelog]
+header = """
+"""
+
+footer = """
+
+-----
+
+**[{{ remote.github.repo }}]({{ self::remote_url() }}) license terms**
+
+[![License][license-badge]][license-url]
+
+[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
+[license-url]: {{ self::remote_url() }}/?tab=Apache-2.0-1-ov-file#readme
+
+{%- macro remote_url() -%}
+ https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
+{%- endmacro -%}
+"""
+
+body = """
+{%- if version %}
+## [{{ version | trim_start_matches(pat="v") }}]({{ self::remote_url() }}/tree/{{ version }}) - {{ timestamp | date(format="%Y-%m-%d") }}
+{%- else %}
+## [unreleased]
+{%- endif %}
+{%- if message %}
+ {%- raw %}\n{% endraw %}
+{{ message }}
+ {%- raw %}\n{% endraw %}
+{%- endif %}
+{%- if version %}
+ {%- if previous.version %}
+
+**Full Changelog**: <{{ self::remote_url() }}/compare/{{ previous.version }}...{{ version }}>
+ {%- endif %}
+{%- else %}
+ {%- raw %}\n{% endraw %}
+{%- endif %}
+
+{%- if statistics %}{% if statistics.commit_count %}
+ {%- raw %}\n{% endraw %}
+{{ statistics.commit_count }} commits in this release.
+ {%- raw %}\n{% endraw %}
+{%- endif %}{% endif %}
+-----
+
+{%- for group, commits in commits | group_by(attribute="group") %}
+ {%- raw %}\n{% endraw %}
+### {{ group | upper_first }}
+ {%- raw %}\n{% endraw %}
+ {%- for commit in commits %}
+ {%- if commit.remote.pr_title %}
+ {%- set commit_message = commit.remote.pr_title %}
+ {%- else %}
+ {%- set commit_message = commit.message %}
+ {%- endif %}
+* {{ commit_message | split(pat="\n") | first | trim }}
+ {%- if commit.remote.username %}
+{%- raw %} {% endraw %}by [@{{ commit.remote.username }}](https://github.com/{{ commit.remote.username }})
+ {%- endif %}
+ {%- if commit.remote.pr_number %}
+{%- raw %} {% endraw %}in [#{{ commit.remote.pr_number }}]({{ self::remote_url() }}/pull/{{ commit.remote.pr_number }})
+ {%- endif %}
+{%- raw %} {% endraw %}[...]({{ self::remote_url() }}/commit/{{ commit.id }})
+ {%- endfor %}
+{%- endfor %}
+
+{%- if github %}
+{%- raw %}\n{% endraw -%}
+ {%- set all_contributors = github.contributors | length %}
+ {%- if github.contributors | filter(attribute="username", value="dependabot[bot]") | length < all_contributors %}
+-----
+
+### People who contributed to this release
+ {% endif %}
+ {%- for contributor in github.contributors | filter(attribute="username") | sort(attribute="username") %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
+* [@{{ contributor.username }}](https://github.com/{{ contributor.username }})
+ {%- endif %}
+ {%- endfor %}
+
+ {% if github.contributors | filter(attribute="is_first_time", value=true) | length != 0 %}
+-----
+ {%- raw %}\n{% endraw %}
+
+### New Contributors
+ {%- endif %}
+
+ {%- for contributor in github.contributors | filter(attribute="is_first_time", value=true) %}
+ {%- if contributor.username != "dependabot[bot]" and contributor.username != "github-actions[bot]" %}
+* @{{ contributor.username }} made their first contribution
+ {%- if contributor.pr_number %}
+ in [#{{ contributor.pr_number }}]({{ self::remote_url() }}/pull/{{ contributor.pr_number }}) \
+ {%- endif %}
+ {%- endif %}
+ {%- endfor %}
+{%- endif %}
+
+{%- raw %}\n{% endraw %}
+
+{%- macro remote_url() -%}
+ https://github.com/{{ remote.github.owner }}/{{ remote.github.repo }}
+{%- endmacro -%}
+"""
+# Remove leading and trailing whitespaces from the changelog's body.
+trim = true
+# Render body even when there are no releases to process.
+render_always = true
+# An array of regex based postprocessors to modify the changelog.
+postprocessors = [
+ # Replace the placeholder with a URL.
+ #{ pattern = '', replace = "https://github.com/orhun/git-cliff" },
+]
+# output file path
+# output = "test.md"
+
+[git]
+# Parse commits according to the conventional commits specification.
+# See https://www.conventionalcommits.org
+conventional_commits = false
+# Exclude commits that do not match the conventional commits specification.
+filter_unconventional = false
+# Require all commits to be conventional.
+# Takes precedence over filter_unconventional.
+require_conventional = false
+# Split commits on newlines, treating each line as an individual commit.
+split_commits = false
+# An array of regex based parsers to modify commit messages prior to further processing.
+commit_preprocessors = [
+ # Replace issue numbers with link templates to be updated in `changelog.postprocessors`.
+ #{ pattern = '\((\w+\s)?#([0-9]+)\)', replace = "([#${2}](/issues/${2}))"},
+ # Check spelling of the commit message using https://github.com/crate-ci/typos.
+ # If the spelling is incorrect, it will be fixed automatically.
+ #{ pattern = '.*', replace_command = 'typos --write-changes -' }
+]
+# Prevent commits that are breaking from being excluded by commit parsers.
+protect_breaking_commits = false
+# An array of regex based parsers for extracting data from the commit message.
+# Assigns commits to groups.
+# Optionally sets the commit's scope and can decide to exclude commits from further processing.
+commit_parsers = [
+ { message = "^[Cc]hore\\([Rr]elease\\): prepare for", skip = true },
+ { message = "(^[Mm]erge)|([Mm]erge conflict)", skip = true },
+ { field = "author.name", pattern = "dependabot*", group = "Updates" },
+ { message = "([Ss]ecurity)|([Vv]uln)", group = "Security" },
+ { body = "(.*[Ss]ecurity)|([Vv]uln)", group = "Security" },
+ { message = "([Cc]hore\\(lint\\))|(style)|(lint)|(codeql)|(golangci)", group = "Code quality" },
+ { message = "(^[Dd]oc)|((?i)readme)|(badge)|(typo)|(documentation)", group = "Documentation" },
+ { message = "(^[Ff]eat)|(^[Ee]nhancement)", group = "Implemented enhancements" },
+ { message = "(^ci)|(\\(ci\\))|(fixup\\s+ci)|(fix\\s+ci)|(license)|(example)", group = "Miscellaneous tasks" },
+ { message = "^test", group = "Testing" },
+ { message = "(^fix)|(panic)", group = "Fixed bugs" },
+ { message = "(^refact)|(rework)", group = "Refactor" },
+ { message = "(^[Pp]erf)|(performance)", group = "Performance" },
+ { message = "(^[Cc]hore)", group = "Miscellaneous tasks" },
+ { message = "^[Rr]evert", group = "Reverted changes" },
+ { message = "(upgrade.*?go)|(go\\s+version)", group = "Updates" },
+ { message = ".*", group = "Other" },
+]
+# Exclude commits that are not matched by any commit parser.
+filter_commits = false
+# An array of link parsers for extracting external references, and turning them into URLs, using regex.
+link_parsers = []
+# Include only the tags that belong to the current branch.
+use_branch_tags = false
+# Order releases topologically instead of chronologically.
+topo_order = false
+# Order releases topologically instead of chronologically.
+topo_order_commits = true
+# Order of commits in each group/release within the changelog.
+# Allowed values: newest, oldest
+sort_commits = "newest"
+# Process submodules commits
+recurse_submodules = false
+
+#[remote.github]
+#owner = "go-openapi"
diff --git a/vendor/github.com/go-openapi/jsonreference/.editorconfig b/vendor/github.com/go-openapi/jsonreference/.editorconfig
new file mode 100644
index 000000000..3152da69a
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/.editorconfig
@@ -0,0 +1,26 @@
+# top-most EditorConfig file
+root = true
+
+# Unix-style newlines with a newline ending every file
+[*]
+end_of_line = lf
+insert_final_newline = true
+indent_style = space
+indent_size = 2
+trim_trailing_whitespace = true
+
+# Set default charset
+[*.{js,py,go,scala,rb,java,html,css,less,sass,md}]
+charset = utf-8
+
+# Tab indentation (no size specified)
+[*.go]
+indent_style = tab
+
+[*.md]
+trim_trailing_whitespace = false
+
+# Matches the exact files either package.json or .travis.yml
+[{package.json,.travis.yml}]
+indent_style = space
+indent_size = 2
diff --git a/vendor/github.com/go-openapi/jsonreference/.golangci.yml b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
index 7cea1af8b..fdae591bc 100644
--- a/vendor/github.com/go-openapi/jsonreference/.golangci.yml
+++ b/vendor/github.com/go-openapi/jsonreference/.golangci.yml
@@ -2,34 +2,17 @@ version: "2"
linters:
default: all
disable:
- - cyclop
- depguard
- - errchkjson
- - errorlint
- - exhaustruct
- - forcetypeassert
- funlen
- - gochecknoglobals
- - gochecknoinits
- - gocognit
- - godot
- godox
- - gosmopolitan
- - inamedparam
- #- intrange # disabled while < go1.22
- - ireturn
- - lll
- - musttag
- - nestif
+ - exhaustruct
- nlreturn
- nonamedreturns
- noinlineerr
- paralleltest
- recvcheck
- testpackage
- - thelper
- tparallel
- - unparam
- varnamelen
- whitespace
- wrapcheck
@@ -41,8 +24,15 @@ linters:
goconst:
min-len: 2
min-occurrences: 3
+ cyclop:
+ max-complexity: 20
gocyclo:
- min-complexity: 45
+ min-complexity: 20
+ exhaustive:
+ default-signifies-exhaustive: true
+ default-case-required: true
+ lll:
+ line-length: 180
exclusions:
generated: lax
presets:
@@ -58,6 +48,7 @@ formatters:
enable:
- gofmt
- goimports
+ - gofumpt
exclusions:
generated: lax
paths:
diff --git a/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md
new file mode 100644
index 000000000..9907d5d21
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/CONTRIBUTORS.md
@@ -0,0 +1,21 @@
+# Contributors
+
+- Repository: ['go-openapi/jsonreference']
+
+| Total Contributors | Total Contributions |
+| --- | --- |
+| 9 | 68 |
+
+| Username | All Time Contribution Count | All Commits |
+| --- | --- | --- |
+| @fredbi | 31 | https://github.com/go-openapi/jsonreference/commits?author=fredbi |
+| @casualjim | 25 | https://github.com/go-openapi/jsonreference/commits?author=casualjim |
+| @youyuanwu | 5 | https://github.com/go-openapi/jsonreference/commits?author=youyuanwu |
+| @olivierlemasle | 2 | https://github.com/go-openapi/jsonreference/commits?author=olivierlemasle |
+| @apelisse | 1 | https://github.com/go-openapi/jsonreference/commits?author=apelisse |
+| @gbjk | 1 | https://github.com/go-openapi/jsonreference/commits?author=gbjk |
+| @honza | 1 | https://github.com/go-openapi/jsonreference/commits?author=honza |
+| @Neo2308 | 1 | https://github.com/go-openapi/jsonreference/commits?author=Neo2308 |
+| @erraggy | 1 | https://github.com/go-openapi/jsonreference/commits?author=erraggy |
+
+ _this file was generated by the [Contributors GitHub Action](https://github.com/github/contributors)_
diff --git a/vendor/github.com/go-openapi/jsonreference/NOTICE b/vendor/github.com/go-openapi/jsonreference/NOTICE
index f9ad7e0f7..f3b51939a 100644
--- a/vendor/github.com/go-openapi/jsonreference/NOTICE
+++ b/vendor/github.com/go-openapi/jsonreference/NOTICE
@@ -8,12 +8,15 @@ by the go-swagger and go-openapi maintainers ("go-swagger maintainers").
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this software except in compliance with the License.
+
You may obtain a copy of the License at
+ http://www.apache.org/licenses/LICENSE-2.0.
+
This software is copied from, derived from, and inspired by other original software products.
It ships with copies of other software which license terms are recalled below.
-The original sofware was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com).
+The original software was authored on 25-02-2013 by sigu-399 (https://github.com/sigu-399, sigu.399@gmail.com).
github.com/sigh-399/jsonpointer
===========================
diff --git a/vendor/github.com/go-openapi/jsonreference/README.md b/vendor/github.com/go-openapi/jsonreference/README.md
index 2274a4b78..d479dbdc7 100644
--- a/vendor/github.com/go-openapi/jsonreference/README.md
+++ b/vendor/github.com/go-openapi/jsonreference/README.md
@@ -1,18 +1,39 @@
-# gojsonreference [](https://github.com/go-openapi/jsonreference/actions?query=workflow%3A"go+test") [](https://codecov.io/gh/go-openapi/jsonreference)
+# jsonreference
-[](https://slackin.goswagger.io)
-[](https://raw.githubusercontent.com/go-openapi/jsonreference/master/LICENSE)
-[](https://pkg.go.dev/github.com/go-openapi/jsonreference)
-[](https://goreportcard.com/report/github.com/go-openapi/jsonreference)
+
+[![Tests][test-badge]][test-url] [![Coverage][cov-badge]][cov-url] [![CI vuln scan][vuln-scan-badge]][vuln-scan-url] [![CodeQL][codeql-badge]][codeql-url]
+
+
+
+[![Release][release-badge]][release-url] [![Go Report Card][gocard-badge]][gocard-url] [![CodeFactor Grade][codefactor-badge]][codefactor-url] [![License][license-badge]][license-url]
+
+
+[![GoDoc][godoc-badge]][godoc-url] [![Slack Channel][slack-logo]![slack-badge]][slack-url] [![go version][goversion-badge]][goversion-url] ![Top language][top-badge] ![Commits since latest release][commits-badge]
-An implementation of JSON Reference - Go language
+---
+
+An implementation of JSON Reference for golang.
## Status
-Feature complete. Stable API
+
+API is stable.
+
+## Import this library in your project
+
+```cmd
+go get github.com/go-openapi/jsonreference
+```
## Dependencies
+
* https://github.com/go-openapi/jsonpointer
+## Basic usage
+
+## Change log
+
+See
+
## References
* http://tools.ietf.org/html/draft-ietf-appsawg-json-pointer-07
@@ -24,3 +45,55 @@ This library ships under the [SPDX-License-Identifier: Apache-2.0](./LICENSE).
See the license [NOTICE](./NOTICE), which recalls the licensing terms of all the pieces of software
on top of which it has been built.
+
+## Other documentation
+
+* [All-time contributors](./CONTRIBUTORS.md)
+* [Contributing guidelines](.github/CONTRIBUTING.md)
+* [Maintainers documentation](docs/MAINTAINERS.md)
+* [Code style](docs/STYLE.md)
+
+## Cutting a new release
+
+Maintainers can cut a new release by either:
+
+* running [this workflow](https://github.com/go-openapi/jsonreference/actions/workflows/bump-release.yml)
+* or pushing a semver tag
+ * signed tags are preferred
+ * The tag message is prepended to release notes
+
+
+[test-badge]: https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml/badge.svg
+[test-url]: https://github.com/go-openapi/jsonreference/actions/workflows/go-test.yml
+[cov-badge]: https://codecov.io/gh/go-openapi/jsonreference/branch/master/graph/badge.svg
+[cov-url]: https://codecov.io/gh/go-openapi/jsonreference
+[vuln-scan-badge]: https://github.com/go-openapi/jsonreference/actions/workflows/scanner.yml/badge.svg
+[vuln-scan-url]: https://github.com/go-openapi/jsonreference/actions/workflows/scanner.yml
+[codeql-badge]: https://github.com/go-openapi/jsonreference/actions/workflows/codeql.yml/badge.svg
+[codeql-url]: https://github.com/go-openapi/jsonreference/actions/workflows/codeql.yml
+
+[release-badge]: https://badge.fury.io/gh/go-openapi%2Fjsonreference.svg
+[release-url]: https://badge.fury.io/gh/go-openapi%2Fjsonreference
+[gomod-badge]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonreference.svg
+[gomod-url]: https://badge.fury.io/go/github.com%2Fgo-openapi%2Fjsonreference
+
+[gocard-badge]: https://goreportcard.com/badge/github.com/go-openapi/jsonreference
+[gocard-url]: https://goreportcard.com/report/github.com/go-openapi/jsonreference
+[codefactor-badge]: https://img.shields.io/codefactor/grade/github/go-openapi/jsonreference
+[codefactor-url]: https://www.codefactor.io/repository/github/go-openapi/jsonreference
+
+[doc-badge]: https://img.shields.io/badge/doc-site-blue?link=https%3A%2F%2Fgoswagger.io%2Fgo-openapi%2F
+[doc-url]: https://goswagger.io/go-openapi
+[godoc-badge]: https://pkg.go.dev/badge/github.com/go-openapi/jsonreference
+[godoc-url]: http://pkg.go.dev/github.com/go-openapi/jsonreference
+[slack-logo]: https://a.slack-edge.com/e6a93c1/img/icons/favicon-32.png
+[slack-badge]: https://img.shields.io/badge/slack-blue?link=https%3A%2F%2Fgoswagger.slack.com%2Farchives%2FC04R30YM
+[slack-url]: https://goswagger.slack.com/archives/C04R30YMU
+
+[license-badge]: http://img.shields.io/badge/license-Apache%20v2-orange.svg
+[license-url]: https://github.com/go-openapi/jsonreference/?tab=Apache-2.0-1-ov-file#readme
+
+[goversion-badge]: https://img.shields.io/github/go-mod/go-version/go-openapi/jsonreference
+[goversion-url]: https://github.com/go-openapi/jsonreference/blob/master/go.mod
+[top-badge]: https://img.shields.io/github/languages/top/go-openapi/jsonreference
+[commits-badge]: https://img.shields.io/github/commits-since/go-openapi/jsonreference/latest
diff --git a/vendor/github.com/go-openapi/jsonreference/SECURITY.md b/vendor/github.com/go-openapi/jsonreference/SECURITY.md
new file mode 100644
index 000000000..2a7b6f091
--- /dev/null
+++ b/vendor/github.com/go-openapi/jsonreference/SECURITY.md
@@ -0,0 +1,19 @@
+# Security Policy
+
+This policy outlines the commitment and practices of the go-openapi maintainers regarding security.
+
+## Supported Versions
+
+| Version | Supported |
+| ------- | ------------------ |
+| 0.22.x | :white_check_mark: |
+
+## Reporting a vulnerability
+
+If you become aware of a security vulnerability that affects the current repository,
+please report it privately to the maintainers.
+
+Please follow the instructions provided by github to
+[Privately report a security vulnerability](https://docs.github.com/en/code-security/security-advisories/guidance-on-reporting-and-writing-information-about-vulnerabilities/privately-reporting-a-security-vulnerability#privately-reporting-a-security-vulnerability).
+
+TL;DR: on Github, navigate to the project's "Security" tab then click on "Report a vulnerability".
diff --git a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
index ca79391dc..a08b47320 100644
--- a/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
+++ b/vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
@@ -14,9 +14,11 @@ const (
defaultHTTPSPort = ":443"
)
-// Regular expressions used by the normalizations
-var rxPort = regexp.MustCompile(`(:\d+)/?$`)
-var rxDupSlashes = regexp.MustCompile(`/{2,}`)
+// Regular expressions used by the normalizations.
+var (
+ rxPort = regexp.MustCompile(`(:\d+)/?$`)
+ rxDupSlashes = regexp.MustCompile(`/{2,}`)
+)
// NormalizeURL will normalize the specified URL
// This was added to replace a previous call to the no longer maintained purell library:
diff --git a/vendor/github.com/go-openapi/jsonreference/reference.go b/vendor/github.com/go-openapi/jsonreference/reference.go
index 33d4798ca..6e3ae4995 100644
--- a/vendor/github.com/go-openapi/jsonreference/reference.go
+++ b/vendor/github.com/go-openapi/jsonreference/reference.go
@@ -18,7 +18,7 @@ const (
var ErrChildURL = errors.New("child url is nil")
-// Ref represents a json reference object
+// Ref represents a json reference object.
type Ref struct {
referenceURL *url.URL
referencePointer jsonpointer.Pointer
@@ -30,7 +30,7 @@ type Ref struct {
HasFullFilePath bool
}
-// New creates a new reference for the given string
+// New creates a new reference for the given string.
func New(jsonReferenceString string) (Ref, error) {
var r Ref
err := r.parse(jsonReferenceString)
@@ -38,7 +38,7 @@ func New(jsonReferenceString string) (Ref, error) {
}
// MustCreateRef parses the ref string and panics when it's invalid.
-// Use the New method for a version that returns an error
+// Use the New method for a version that returns an error.
func MustCreateRef(ref string) Ref {
r, err := New(ref)
if err != nil {
@@ -48,17 +48,17 @@ func MustCreateRef(ref string) Ref {
return r
}
-// GetURL gets the URL for this reference
+// GetURL gets the URL for this reference.
func (r *Ref) GetURL() *url.URL {
return r.referenceURL
}
-// GetPointer gets the json pointer for this reference
+// GetPointer gets the json pointer for this reference.
func (r *Ref) GetPointer() *jsonpointer.Pointer {
return &r.referencePointer
}
-// String returns the best version of the url for this reference
+// String returns the best version of the url for this reference.
func (r *Ref) String() string {
if r.referenceURL != nil {
return r.referenceURL.String()
@@ -71,7 +71,7 @@ func (r *Ref) String() string {
return r.referencePointer.String()
}
-// IsRoot returns true if this reference is a root document
+// IsRoot returns true if this reference is a root document.
func (r *Ref) IsRoot() bool {
return r.referenceURL != nil &&
!r.IsCanonical() &&
@@ -79,13 +79,13 @@ func (r *Ref) IsRoot() bool {
r.referenceURL.Fragment == ""
}
-// IsCanonical returns true when this pointer starts with http(s):// or file://
+// IsCanonical returns true when this pointer starts with http(s):// or file://.
func (r *Ref) IsCanonical() bool {
return (r.HasFileScheme && r.HasFullFilePath) || (!r.HasFileScheme && r.HasFullURL)
}
// Inherits creates a new reference from a parent and a child
-// If the child cannot inherit from the parent, an error is returned
+// If the child cannot inherit from the parent, an error is returned.
func (r *Ref) Inherits(child Ref) (*Ref, error) {
childURL := child.GetURL()
parentURL := r.GetURL()
@@ -103,7 +103,7 @@ func (r *Ref) Inherits(child Ref) (*Ref, error) {
return &ref, nil
}
-// "Constructor", parses the given string JSON reference
+// "Constructor", parses the given string JSON reference.
func (r *Ref) parse(jsonReferenceString string) error {
parsed, err := url.Parse(jsonReferenceString)
if err != nil {
diff --git a/vendor/go.opentelemetry.io/otel/.codespellignore b/vendor/go.opentelemetry.io/otel/.codespellignore
index 2b53a25e1..a6d0cbcc9 100644
--- a/vendor/go.opentelemetry.io/otel/.codespellignore
+++ b/vendor/go.opentelemetry.io/otel/.codespellignore
@@ -8,3 +8,4 @@ nam
valu
thirdparty
addOpt
+observ
diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml
index b01762ffc..1b1b2aff9 100644
--- a/vendor/go.opentelemetry.io/otel/.golangci.yml
+++ b/vendor/go.opentelemetry.io/otel/.golangci.yml
@@ -197,6 +197,9 @@ linters:
- float-compare
- go-require
- require-error
+ usetesting:
+ context-background: true
+ context-todo: true
exclusions:
generated: lax
presets:
diff --git a/vendor/go.opentelemetry.io/otel/.lycheeignore b/vendor/go.opentelemetry.io/otel/.lycheeignore
index 532850588..994b677df 100644
--- a/vendor/go.opentelemetry.io/otel/.lycheeignore
+++ b/vendor/go.opentelemetry.io/otel/.lycheeignore
@@ -1,4 +1,5 @@
http://localhost
+https://localhost
http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects
@@ -6,4 +7,7 @@ https://github.com/open-telemetry/opentelemetry-go/projects
https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
-http://4.3.2.1:78/user/123
\ No newline at end of file
+http://4.3.2.1:78/user/123
+file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317
+# URL works, but it has blocked link checkers.
+https://dl.acm.org/doi/10.1145/198429.198435
diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
index f3abcfdc2..ecbe0582c 100644
--- a/vendor/go.opentelemetry.io/otel/CHANGELOG.md
+++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md
@@ -11,6 +11,74 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
+## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05
+
+### Added
+
+- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175)
+- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages.
+ This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287)
+- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`.
+ Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353)
+- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434)
+- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486)
+- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374)
+- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512)
+- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524)
+- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571)
+- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608)
+- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`.
+ All `Processor` implementations now include an `Enabled` method. (#7639)
+- The `go.opentelemetry.io/otel/semconv/v1.38.0` package.
+ The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions.
+ See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648)
+
+### Changed
+
+- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set.
+ Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266)
+- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266)
+- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302)
+- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306)
+- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`.
+ ⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded.
+ Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default.
+ To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363)
+- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371)
+- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421)
+- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427)
+- Include W3C TraceFlags (bits 0–7) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438)
+- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types.
+ If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442)
+
+### Fixed
+
+- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them.
+ Attributes with duplicate keys will use the last value passed. (#7300)
+- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372)
+- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372)
+- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403)
+- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655)
+- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656)
+
+### Removed
+
+- Drop support for [Go 1.23]. (#7274)
+- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`.
+ The `Enabled` method has been added to the `Processor` interface instead.
+ All `Processor` implementations must now implement the `Enabled` method.
+ Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639)
+
## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
This release is the last to support [Go 1.23].
@@ -3430,8 +3498,10 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project.
-[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.38.0...HEAD
+[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.39.0...HEAD
+[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
+[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
index 0b3ae855c..ff5e1f76e 100644
--- a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
+++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md
@@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel
(This may print some warning about "build constraints exclude all Go
files", just ignore it.)
-This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
-can alternatively use `git` directly with:
+This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`.
+Alternatively, you can use `git` directly with:
```sh
git clone https://github.com/open-telemetry/opentelemetry-go
@@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go
that name is a kind of a redirector to GitHub that `go get` can
understand, but `git` does not.)
-This would put the project in the `opentelemetry-go` directory in
-current working directory.
+This will add the project as `opentelemetry-go` within the current directory.
Enter the newly created directory and add your fork as a new remote:
@@ -109,7 +108,7 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the
maintainer merging.
- * At least one of the qualified approvals need to be from an
+ * At least one of the qualified approvals needs to be from an
[Approver]/[Maintainer] affiliated with a different company than the author
of the PR.
* PRs introducing changes that have already been discussed and consensus
@@ -166,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance
OpenTelemetry is an evolving specification, one where the desires and
-use cases are clear, but the method to satisfy those uses cases are
+use cases are clear, but the methods to satisfy those use cases are
not.
As such, Contributions should provide functionality and behavior that
-conforms to the specification, but the interface and structure is
+conforms to the specification, but the interface and structure are
flexible.
It is preferable to have contributions follow the idioms of the
@@ -217,7 +216,7 @@ about dependency compatibility.
This project does not partition dependencies based on the environment (i.e.
`development`, `staging`, `production`).
-Only the dependencies explicitly included in the released modules have be
+Only the dependencies explicitly included in the released modules have been
tested and verified to work with the released code. No other guarantee is made
about the compatibility of other dependencies.
@@ -635,8 +634,8 @@ is not in their root name.
The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling
-between the two modules where a user can upgrade the parent without the child
-and if the internal package API has changed it will fail to upgrade[^3].
+between the two modules where a user can upgrade the parent without the child,
+and if the internal package API has changed, it will fail to upgrade[^3].
There are two known exceptions to this rule:
@@ -657,7 +656,7 @@ this.
### Ignoring context cancellation
-OpenTelemetry API implementations need to ignore the cancellation of the context that are
+OpenTelemetry API implementations need to ignore the cancellation of the context that is
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
@@ -675,6 +674,441 @@ force flushing telemetry, shutting down a signal provider) the context cancellat
should be honored. This means all work done on behalf of the user provided context
should be canceled.
+### Observability
+
+OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself.
+This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications.
+
+This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components.
+
+#### Environment Variable Activation
+
+Observability features are currently experimental.
+They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable.
+This follows the established experimental feature pattern used throughout the SDK.
+
+Components should check for this environment variable using a consistent pattern:
+
+```go
+import "go.opentelemetry.io/otel/*/internal/x"
+
+if x.Observability.Enabled() {
+ // Initialize observability metrics
+}
+```
+
+**References**:
+
+- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go)
+- [sdk](./sdk/internal/x/x.go)
+
+#### Encapsulation
+
+Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`).
+It should not be mixed into the instrumented component.
+
+Prefer this:
+
+```go
+type SDKComponent struct {
+ inst *instrumentation
+}
+
+type instrumentation struct {
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+```
+
+To this:
+
+```go
+// ❌ Avoid this pattern.
+type SDKComponent struct {
+ /* other SDKComponent fields... */
+
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+```
+
+The instrumentation code should not bloat the code being instrumented.
+Likely, this means its own file, or its own package if it is complex or reused.
+
+#### Initialization
+
+Instrumentation setup should be explicit, side-effect free, and local to the relevant component.
+Avoid relying on global or implicit [side effects][side-effect] for initialization.
+
+Encapsulate setup in constructor functions, ensuring clear ownership and scope:
+
+```go
+import (
+ "errors"
+
+ semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
+ "go.opentelemetry.io/otel/semconv/v1.37.0/otelconv"
+)
+
+type SDKComponent struct {
+ inst *instrumentation
+}
+
+func NewSDKComponent(config Config) (*SDKComponent, error) {
+ inst, err := newInstrumentation()
+ if err != nil {
+ return nil, err
+ }
+ return &SDKComponent{inst: inst}, nil
+}
+
+type instrumentation struct {
+ inflight otelconv.SDKComponentInflight
+ exported otelconv.SDKComponentExported
+}
+
+func newInstrumentation() (*instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ meter := otel.GetMeterProvider().Meter(
+ "",
+ metric.WithInstrumentationVersion(sdk.Version()),
+ metric.WithSchemaURL(semconv.SchemaURL),
+ )
+
+ inst := &instrumentation{}
+
+ var err, e error
+ inst.inflight, e = otelconv.NewSDKComponentInflight(meter)
+ err = errors.Join(err, e)
+
+ inst.exported, e = otelconv.NewSDKComponentExported(meter)
+ err = errors.Join(err, e)
+
+ return inst, err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (c *Component) initObservability() {
+ // Initialize observability metrics
+ if !x.Observability.Enabled() {
+ return
+ }
+
+ // Initialize observability metrics
+ c.inst = &instrumentation{/* ... */}
+}
+```
+
+[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science)
+
+#### Performance
+
+When observability is disabled there should be little to no overhead.
+
+```go
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ if e.inst != nil {
+ attrs := expensiveOperation()
+ e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
+ }
+ // Export spans...
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ attrs := expensiveOperation()
+ e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
+ // Export spans...
+}
+
+func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) {
+ if i == nil || i.inflight == nil {
+ return
+ }
+ i.inflight.Add(ctx, count, metric.WithAttributes(attrs...))
+}
+```
+
+When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead.
+
+##### Attribute and Option Allocation Management
+
+Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes.
+
+```go
+var (
+ attrPool = sync.Pool{
+ New: func() any {
+ // Pre-allocate common capacity
+ knownCap := 8 // Adjust based on expected usage
+ s := make([]attribute.KeyValue, 0, knownCap)
+ // Return a pointer to avoid extra allocation on Put().
+ return &s
+ },
+ }
+
+ addOptPool = &sync.Pool{
+ New: func() any {
+ const n = 1 // WithAttributeSet
+ o := make([]metric.AddOption, 0, n)
+ // Return a pointer to avoid extra allocation on Put().
+ return &o
+ },
+ }
+)
+
+func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) {
+ attrs := attrPool.Get().(*[]attribute.KeyValue)
+ defer func() {
+ *attrs = (*attrs)[:0] // Reset.
+ attrPool.Put(attrs)
+ }()
+
+ *attrs = append(*attrs, baseAttrs...)
+ // Add any dynamic attributes.
+ *attrs = append(*attrs, semconv.OTelComponentName("exporter-1"))
+
+ addOpt := addOptPool.Get().(*[]metric.AddOption)
+ defer func() {
+ *addOpt = (*addOpt)[:0]
+ addOptPool.Put(addOpt)
+ }()
+
+ set := attribute.NewSet(*attrs...)
+ *addOpt = append(*addOpt, metric.WithAttributeSet(set))
+
+ i.counter.Add(ctx, value, *addOpt...)
+}
+```
+
+Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used.
+This amortizes the cost of allocation and synchronization.
+Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness.
+
+[`sync.Pool`]: https://pkg.go.dev/sync#Pool
+
+##### Cache common attribute sets for repeated measurements
+
+If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes.
+
+```go
+type spanLiveSetKey struct {
+ sampled bool
+}
+
+var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
+ {true}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordAndSample,
+ ),
+ ),
+ {false}: attribute.NewSet(
+ otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
+ otelconv.SpanSamplingResultRecordOnly,
+ ),
+ ),
+}
+
+func spanLiveSet(sampled bool) attribute.Set {
+ key := spanLiveSetKey{sampled: sampled}
+ return spanLiveSetCache[key]
+}
+```
+
+##### Benchmarking
+
+Always provide benchmarks when introducing or refactoring instrumentation.
+Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios:
+
+```go
+func BenchmarkExportSpans(b *testing.B) {
+ scenarios := []struct {
+ name string
+ obsEnabled bool
+ }{
+ {"ObsDisabled", false},
+ {"ObsEnabled", true},
+ }
+
+ for _, scenario := range scenarios {
+ b.Run(scenario.name, func(b *testing.B) {
+ b.Setenv(
+ "OTEL_GO_X_OBSERVABILITY",
+ strconv.FormatBool(scenario.obsEnabled),
+ )
+
+ exporter := NewExporter()
+ spans := generateTestSpans(100)
+
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ _ = exporter.ExportSpans(context.Background(), spans)
+ }
+ })
+ }
+}
+```
+
+#### Error Handling and Robustness
+
+Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible.
+
+```go
+func newInstrumentation() (*instrumentation, error) {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ m := otel.GetMeterProvider().Meter(/* initialize meter */)
+ counter, err := otelconv.NewSDKComponentCounter(m)
+ // Use the partially initialized counter if available.
+ i := &instrumentation{counter: counter}
+ // Return any error to the caller.
+ return i, err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func newInstrumentation() *instrumentation {
+ if !x.Observability.Enabled() {
+ return nil, nil
+ }
+
+ m := otel.GetMeterProvider().Meter(/* initialize meter */)
+ counter, err := otelconv.NewSDKComponentCounter(m)
+ if err != nil {
+ // ❌ Do not dump the error to the OTel Handler. Return it to the
+ // caller.
+ otel.Handle(err)
+ // ❌ Do not return nil if we can still use the partially initialized
+ // counter.
+ return nil
+ }
+ return &instrumentation{counter: counter}
+}
+```
+
+If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`.
+
+#### Context Propagation
+
+Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context:
+
+```go
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ // Use the provided context for observability measurements
+ e.inst.recordSpanExportStarted(ctx, len(spans))
+
+ err := e.doExport(ctx, spans)
+
+ if err != nil {
+ e.inst.recordSpanExportFailed(ctx, len(spans), err)
+ } else {
+ e.inst.recordSpanExportSucceeded(ctx, len(spans))
+ }
+
+ return err
+}
+```
+
+```go
+// ❌ Avoid this pattern.
+func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
+ // ❌ Do not break the context propagation.
+ e.inst.recordSpanExportStarted(context.Background(), len(spans))
+
+ err := e.doExport(ctx, spans)
+
+ /* ... */
+
+ return err
+}
+```
+
+#### Semantic Conventions Compliance
+
+All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md).
+
+Use the metric semantic conventions convenience package [otelconv](./semconv/v1.37.0/otelconv/metric.go).
+
+##### Component Identification
+
+Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes).
+
+If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier.
+
+```go
+componentType := "go.opentelemetry.io/otel/sdk/trace.Span"
+```
+
+```go
+// ❌ Do not do this.
+componentType := "trace-span"
+```
+
+The component name should be a stable unique identifier for the specific instance of the component.
+
+Use a global counter to ensure uniqueness if necessary.
+
+```go
+// Unique 0-based ID counter for component instances.
+var componentIDCounter atomic.Int64
+
+// nextID returns the next unique ID for a component.
+func nextID() int64 {
+ return componentIDCounter.Add(1) - 1
+}
+
+// componentName returns a unique name for the component instance.
+func componentName() attribute.KeyValue {
+ id := nextID()
+ name := fmt.Sprintf("%s/%d", componentType, id)
+ return semconv.OTelComponentName(name)
+}
+```
+
+The component ID will need to be resettable for deterministic testing.
+If tests are in a different package than the component being tested (i.e. a `_test` package name), use a generated `counter` internal package to manage the counter.
+See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference.
+
+#### Testing
+
+Use deterministic testing with isolated state:
+
+```go
+func TestObservability(t *testing.T) {
+ // Restore state after test to ensure this does not affect other tests.
+ prev := otel.GetMeterProvider()
+ t.Cleanup(func() { otel.SetMeterProvider(prev) })
+
+ // Isolate the meter provider for deterministic testing
+ reader := metric.NewManualReader()
+ meterProvider := metric.NewMeterProvider(metric.WithReader(reader))
+ otel.SetMeterProvider(meterProvider)
+
+ // Use t.Setenv to ensure environment variable is restored after test.
+ t.Setenv("OTEL_GO_X_OBSERVABILITY", "true")
+
+ // Reset component ID counter to ensure deterministic component names.
+ componentIDCounter.Store(0)
+
+ /* ... test code ... */
+}
+```
+
+Test order should not affect results.
+Ensure that any global state (e.g. component ID counters) is reset between tests.
+
## Approvers and Maintainers
### Maintainers
@@ -696,7 +1130,6 @@ For more information about the approver role, see the [community repository](htt
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
-- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
@@ -704,6 +1137,7 @@ For more information about the triager role, see the [community repository](http
- [Aaron Clawson](https://github.com/MadVikingGod)
- [Anthony Mirabella](https://github.com/Aneurysm9)
+- [Cheng-Zhen Yang](https://github.com/scorpionknifes)
- [Chester Cheung](https://github.com/hanyuancheung)
- [Evan Torrie](https://github.com/evantorrie)
- [Gustavo Silva Paiva](https://github.com/paivagustavo)
diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile
index bc0f1f92d..44870248c 100644
--- a/vendor/go.opentelemetry.io/otel/Makefile
+++ b/vendor/go.opentelemetry.io/otel/Makefile
@@ -146,11 +146,12 @@ build-tests/%:
# Tests
-TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe
+TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz
.PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short
+test-fuzz: ARGS=-fuzztime=10s -fuzz
test-verbose: ARGS=-v -race
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
test-concurrent-safe: TIMEOUT=120
diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md
index 6b7ab5f21..c63359543 100644
--- a/vendor/go.opentelemetry.io/otel/README.md
+++ b/vendor/go.opentelemetry.io/otel/README.md
@@ -55,25 +55,18 @@ Currently, this project supports the following environments.
|----------|------------|--------------|
| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 |
-| Ubuntu | 1.23 | amd64 |
| Ubuntu | 1.25 | 386 |
| Ubuntu | 1.24 | 386 |
-| Ubuntu | 1.23 | 386 |
| Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.24 | arm64 |
-| Ubuntu | 1.23 | arm64 |
-| macOS 13 | 1.25 | amd64 |
-| macOS 13 | 1.24 | amd64 |
-| macOS 13 | 1.23 | amd64 |
+| macOS | 1.25 | amd64 |
+| macOS | 1.24 | amd64 |
| macOS | 1.25 | arm64 |
| macOS | 1.24 | arm64 |
-| macOS | 1.23 | arm64 |
| Windows | 1.25 | amd64 |
| Windows | 1.24 | amd64 |
-| Windows | 1.23 | amd64 |
| Windows | 1.25 | 386 |
| Windows | 1.24 | 386 |
-| Windows | 1.23 | 386 |
While this project should work for other systems, no compatibility guarantees
are made for those systems currently.
diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md
index 1ddcdef03..861756fd7 100644
--- a/vendor/go.opentelemetry.io/otel/RELEASING.md
+++ b/vendor/go.opentelemetry.io/otel/RELEASING.md
@@ -24,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit
## Breaking changes validation
-You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API.
+You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
@@ -62,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
```
3. Update the [Changelog](./CHANGELOG.md).
- - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
+ - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand.
To verify this, you can look directly at the commits since the ``.
```
@@ -107,34 +107,50 @@ It is critical you make sure the version you push upstream is correct.
...
```
-## Release
+## Sign artifacts
-Finally create a Release for the new `` on GitHub.
-The release body should include all the release notes from the Changelog for this release.
+To ensure we comply with CNCF best practices, we need to sign the release artifacts.
-### Sign the Release Artifact
+Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag.
+Both archives need to be signed with your GPG key.
-To ensure we comply with CNCF best practices, we need to sign the release artifact.
-The tarball attached to the GitHub release needs to be signed with your GPG key.
+You can use [this script] to verify the contents of the archives before signing them.
-Follow [these steps] to sign the release artifact and upload it to GitHub.
-You can use [this script] to verify the contents of the tarball before signing it.
+To find your GPG key ID, run:
-Be sure to use the correct GPG key when signing the release artifact.
+```terminal
+gpg --list-secret-keys --keyid-format=long
+```
+
+The key ID is the 16-character string after `sec rsa4096/` (or similar).
+
+Set environment variables and sign both artifacts:
```terminal
-gpg --local-user --armor --detach-sign opentelemetry-go-.tar.gz
+export VERSION="" # e.g., v1.32.0
+export KEY_ID=""
+
+gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz
+gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip
```
-You can verify the signature with:
+You can verify the signatures with:
```terminal
-gpg --verify opentelemetry-go-.tar.gz.asc opentelemetry-go-.tar.gz
+gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz
+gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip
```
-[these steps]: https://wiki.debian.org/Creating%20signed%20GitHub%20releases
[this script]: https://github.com/MrAlias/attest-sh
+## Release
+
+Finally create a Release for the new `` on GitHub.
+The release body should include all the release notes from the Changelog for this release.
+
+***IMPORTANT***: GitHub Releases are immutable once created.
+You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later.
+
## Post-Release
### Contrib Repository
@@ -160,14 +176,6 @@ This helps track what changes were included in each release.
Once all related issues and PRs have been added to the milestone, close the milestone.
-### Demo Repository
-
-Bump the dependencies in the following Go services:
-
-- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting)
-- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout)
-- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
-
### Close the `Version Release` issue
Once the todo list in the `Version Release` issue is complete, close the issue.
diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md
index b8cb605c1..b27c9e84f 100644
--- a/vendor/go.opentelemetry.io/otel/VERSIONING.md
+++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md
@@ -83,7 +83,7 @@ is designed so the following goals can be achieved.
in either the module path or the import path.
* In addition to public APIs, telemetry produced by stable instrumentation
will remain stable and backwards compatible. This is to avoid breaking
- alerts and dashboard.
+ alerts and dashboards.
* Modules will be used to encapsulate instrumentation, detectors, exporters,
propagators, and any other independent sets of related components.
* Experimental modules still under active development will be versioned at
diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
index 6333d34b3..6cc1a1655 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/encoder.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go
@@ -16,7 +16,7 @@ type (
// set into a wire representation.
Encoder interface {
// Encode returns the serialized encoding of the attribute set using
- // its Iterator. This result may be cached by a attribute.Set.
+ // its Iterator. This result may be cached by an attribute.Set.
Encode(iterator Iterator) string
// ID returns a value that is unique for each class of attribute
diff --git a/vendor/go.opentelemetry.io/otel/attribute/hash.go b/vendor/go.opentelemetry.io/otel/attribute/hash.go
new file mode 100644
index 000000000..6aa69aeae
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/hash.go
@@ -0,0 +1,92 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+package attribute // import "go.opentelemetry.io/otel/attribute"
+
+import (
+ "fmt"
+ "reflect"
+
+ "go.opentelemetry.io/otel/attribute/internal/xxhash"
+)
+
+// Type identifiers. These identifiers are hashed before the value of the
+// corresponding type. This is done to distinguish values that are hashed with
+// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and
+// int64(0)).
+//
+// These are all 8 byte length strings converted to a uint64 representation. A
+// uint64 is used instead of the string directly as an optimization, it avoids
+// the for loop in [xxhash] which adds minor overhead.
+const (
+ boolID uint64 = 7953749933313450591 // "_boolean" (little endian)
+ int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian)
+ float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian)
+ stringID uint64 = 6874584755375207263 // "_string_" (little endian)
+ boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian)
+ int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian)
+ float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian)
+ stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian)
+)
+
+// hashKVs returns a new xxHash64 hash of kvs.
+func hashKVs(kvs []KeyValue) uint64 {
+ h := xxhash.New()
+ for _, kv := range kvs {
+ h = hashKV(h, kv)
+ }
+ return h.Sum64()
+}
+
+// hashKV returns the xxHash64 hash of kv with h as the base.
+func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash {
+ h = h.String(string(kv.Key))
+
+ switch kv.Value.Type() {
+ case BOOL:
+ h = h.Uint64(boolID)
+ h = h.Uint64(kv.Value.numeric)
+ case INT64:
+ h = h.Uint64(int64ID)
+ h = h.Uint64(kv.Value.numeric)
+ case FLOAT64:
+ h = h.Uint64(float64ID)
+ // Assumes numeric stored with math.Float64bits.
+ h = h.Uint64(kv.Value.numeric)
+ case STRING:
+ h = h.Uint64(stringID)
+ h = h.String(kv.Value.stringly)
+ case BOOLSLICE:
+ h = h.Uint64(boolSliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Bool(rv.Index(i).Bool())
+ }
+ case INT64SLICE:
+ h = h.Uint64(int64SliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Int64(rv.Index(i).Int())
+ }
+ case FLOAT64SLICE:
+ h = h.Uint64(float64SliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.Float64(rv.Index(i).Float())
+ }
+ case STRINGSLICE:
+ h = h.Uint64(stringSliceID)
+ rv := reflect.ValueOf(kv.Value.slice)
+ for i := 0; i < rv.Len(); i++ {
+ h = h.String(rv.Index(i).String())
+ }
+ case INVALID:
+ default:
+ // Logging is an alternative, but using the internal logger here
+ // causes an import cycle so it is not done.
+ v := kv.Value.AsInterface()
+ msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v)
+ panic(msg)
+ }
+ return h
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go
new file mode 100644
index 000000000..113a97838
--- /dev/null
+++ b/vendor/go.opentelemetry.io/otel/attribute/internal/xxhash/xxhash.go
@@ -0,0 +1,64 @@
+// Copyright The OpenTelemetry Authors
+// SPDX-License-Identifier: Apache-2.0
+
+// Package xxhash provides a wrapper around the xxhash library for attribute hashing.
+package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash"
+
+import (
+ "encoding/binary"
+ "math"
+
+ "github.com/cespare/xxhash/v2"
+)
+
+// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values.
+type Hash struct {
+ d *xxhash.Digest
+}
+
+// New returns a new initialized xxHash64 hasher.
+func New() Hash {
+ return Hash{d: xxhash.New()}
+}
+
+func (h Hash) Uint64(val uint64) Hash {
+ var buf [8]byte
+ binary.LittleEndian.PutUint64(buf[:], val)
+ // errors from Write are always nil for xxhash
+ // if it returns an err then panic
+ _, err := h.d.Write(buf[:])
+ if err != nil {
+ panic("xxhash write of uint64 failed: " + err.Error())
+ }
+ return h
+}
+
+func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function.
+ if val {
+ return h.Uint64(1)
+ }
+ return h.Uint64(0)
+}
+
+func (h Hash) Float64(val float64) Hash {
+ return h.Uint64(math.Float64bits(val))
+}
+
+func (h Hash) Int64(val int64) Hash {
+ return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing.
+}
+
+func (h Hash) String(val string) Hash {
+ // errors from WriteString are always nil for xxhash
+ // if it returns an err then panic
+ _, err := h.d.WriteString(val)
+ if err != nil {
+ panic("xxhash write of string failed: " + err.Error())
+ }
+ return h
+}
+
+// Sum64 returns the current hash value.
+func (h Hash) Sum64() uint64 {
+ return h.d.Sum64()
+}
diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go
index 64735d382..911d557ee 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/set.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/set.go
@@ -9,6 +9,8 @@ import (
"reflect"
"slices"
"sort"
+
+ "go.opentelemetry.io/otel/attribute/internal/xxhash"
)
type (
@@ -23,19 +25,19 @@ type (
// the Equals method to ensure stable equivalence checking.
//
// Users should also use the Distinct returned from Equivalent as a map key
- // instead of a Set directly. In addition to that type providing guarantees
- // on stable equivalence, it may also provide performance improvements.
+ // instead of a Set directly. Set has relatively poor performance when used
+ // as a map key compared to Distinct.
Set struct {
- equivalent Distinct
+ hash uint64
+ data any
}
- // Distinct is a unique identifier of a Set.
+ // Distinct is an identifier of a Set which is very likely to be unique.
//
- // Distinct is designed to ensure equivalence stability: comparisons will
- // return the same value across versions. For this reason, Distinct should
- // always be used as a map key instead of a Set.
+ // Distinct should be used as a map key instead of a Set for to provide better
+ // performance for map operations.
Distinct struct {
- iface any
+ hash uint64
}
// Sortable implements sort.Interface, used for sorting KeyValue.
@@ -46,15 +48,34 @@ type (
Sortable []KeyValue
)
+// Compile time check these types remain comparable.
+var (
+ _ = isComparable(Set{})
+ _ = isComparable(Distinct{})
+)
+
+func isComparable[T comparable](t T) T { return t }
+
var (
// keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{})
- // emptySet is returned for empty attribute sets.
- emptySet = &Set{
- equivalent: Distinct{
- iface: [0]KeyValue{},
- },
+ // emptyHash is the hash of an empty set.
+ emptyHash = xxhash.New().Sum64()
+
+ // userDefinedEmptySet is an empty set. It was mistakenly exposed to users
+ // as something they can assign to, so it must remain addressable and
+ // mutable.
+ //
+ // This is kept for backwards compatibility, but should not be used in new code.
+ userDefinedEmptySet = &Set{
+ hash: emptyHash,
+ data: [0]KeyValue{},
+ }
+
+ emptySet = Set{
+ hash: emptyHash,
+ data: [0]KeyValue{},
}
)
@@ -62,33 +83,35 @@ var (
//
// This is a convenience provided for optimized calling utility.
func EmptySet() *Set {
- return emptySet
-}
-
-// reflectValue abbreviates reflect.ValueOf(d).
-func (d Distinct) reflectValue() reflect.Value {
- return reflect.ValueOf(d.iface)
+ // Continue to return the pointer to the user-defined empty set for
+ // backwards-compatibility.
+ //
+ // New code should not use this, instead use emptySet.
+ return userDefinedEmptySet
}
// Valid reports whether this value refers to a valid Set.
-func (d Distinct) Valid() bool {
- return d.iface != nil
+func (d Distinct) Valid() bool { return d.hash != 0 }
+
+// reflectValue abbreviates reflect.ValueOf(d).
+func (l Set) reflectValue() reflect.Value {
+ return reflect.ValueOf(l.data)
}
// Len returns the number of attributes in this set.
func (l *Set) Len() int {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return 0
}
- return l.equivalent.reflectValue().Len()
+ return l.reflectValue().Len()
}
// Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return KeyValue{}, false
}
- value := l.equivalent.reflectValue()
+ value := l.reflectValue()
if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for
@@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) {
// Value returns the value of a specified key in this set.
func (l *Set) Value(k Key) (Value, bool) {
- if l == nil || !l.equivalent.Valid() {
+ if l == nil || l.hash == 0 {
return Value{}, false
}
- rValue := l.equivalent.reflectValue()
+ rValue := l.reflectValue()
vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool {
@@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue {
return iter.ToSlice()
}
-// Equivalent returns a value that may be used as a map key. The Distinct type
-// guarantees that the result will equal the equivalent. Distinct value of any
+// Equivalent returns a value that may be used as a map key. Equal Distinct
+// values are very likely to be equivalent attribute Sets. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct {
- if l == nil || !l.equivalent.Valid() {
- return emptySet.equivalent
+ if l == nil || l.hash == 0 {
+ return Distinct{hash: emptySet.hash}
}
- return l.equivalent
+ return Distinct{hash: l.hash}
}
// Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool {
- return l.Equivalent() == o.Equivalent()
+ if l.Equivalent() != o.Equivalent() {
+ return false
+ }
+ if l == nil || l.hash == 0 {
+ l = &emptySet
+ }
+ if o == nil || o.hash == 0 {
+ o = &emptySet
+ }
+ return l.data == o.data
}
// Encoded returns the encoded form of this set, according to encoder.
@@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string {
return encoder.Encode(l.Iter())
}
-func empty() Set {
- return Set{
- equivalent: emptySet.equivalent,
- }
-}
-
// NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details.
//
@@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set.
if len(kvs) == 0 {
- return empty(), nil
+ return emptySet, nil
}
// Stable sort so the following de-duplication can implement
@@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if filter != nil {
if div := filteredToFront(kvs, filter); div != 0 {
- return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
+ return newSet(kvs[div:]), kvs[:div]
}
}
- return Set{equivalent: computeDistinct(kvs)}, nil
+ return newSet(kvs), nil
}
// NewSetWithSortableFiltered returns a new Set.
@@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out.
- return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
+ return newSet(slice[1:]), slice[:1]
}
// Move the filtered slice[first] to the front (preserving order).
@@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
// Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1
- return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
+ return newSet(slice[div:]), slice[:div]
}
-// computeDistinct returns a Distinct using either the fixed- or
-// reflect-oriented code path, depending on the size of the input. The input
-// slice is assumed to already be sorted and de-duplicated.
-func computeDistinct(kvs []KeyValue) Distinct {
- iface := computeDistinctFixed(kvs)
- if iface == nil {
- iface = computeDistinctReflect(kvs)
+// newSet returns a new set based on the sorted and uniqued kvs.
+func newSet(kvs []KeyValue) Set {
+ s := Set{
+ hash: hashKVs(kvs),
+ data: computeDataFixed(kvs),
}
- return Distinct{
- iface: iface,
+ if s.data == nil {
+ s.data = computeDataReflect(kvs)
}
+ return s
}
-// computeDistinctFixed computes a Distinct for small slices. It returns nil
-// if the input is too large for this code path.
-func computeDistinctFixed(kvs []KeyValue) any {
+// computeDataFixed computes a Set data for small slices. It returns nil if the
+// input is too large for this code path.
+func computeDataFixed(kvs []KeyValue) any {
switch len(kvs) {
case 1:
return [1]KeyValue(kvs)
@@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) any {
}
}
-// computeDistinctReflect computes a Distinct using reflection, works for any
-// size input.
-func computeDistinctReflect(kvs []KeyValue) any {
+// computeDataReflect computes a Set data using reflection, works for any size
+// input.
+func computeDataReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@@ -383,7 +408,7 @@ func computeDistinctReflect(kvs []KeyValue) any {
// MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) {
- return json.Marshal(l.equivalent.iface)
+ return json.Marshal(l.data)
}
// MarshalLog is the marshaling function used by the logging system to represent this Set.
diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
index e584b2477..24f1fa37d 100644
--- a/vendor/go.opentelemetry.io/otel/attribute/type_string.go
+++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go
@@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string {
- if i < 0 || i >= Type(len(_Type_index)-1) {
+ idx := int(i) - 0
+ if i < 0 || idx >= len(_Type_index)-1 {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
}
- return _Type_name[_Type_index[i]:_Type_index[i+1]]
+ return _Type_name[_Type_index[idx]:_Type_index[idx+1]]
}
diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
index f83a448ec..78e98c4c0 100644
--- a/vendor/go.opentelemetry.io/otel/baggage/baggage.go
+++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go
@@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// If we couldn't find any valid key character,
// it means the key is either empty or invalid.
if keyStart == keyEnd {
- return
+ return p, ok
}
// Skip spaces after the key: " key< >= value ".
@@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// A key can have no value, like: " key ".
ok = true
p.key = s[keyStart:keyEnd]
- return
+ return p, ok
}
// If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid.
if s[index] != keyValueDelimiter[0] {
- return
+ return p, ok
}
// Attempting to parse the value.
@@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// we have not reached the end, it means the property is
// invalid, something like: " key = value value1".
if index != len(s) {
- return
+ return p, ok
}
// Decode a percent-encoded value.
rawVal := s[valueStart:valueEnd]
unescapeVal, err := url.PathUnescape(rawVal)
if err != nil {
- return
+ return p, ok
}
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
@@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
p.hasValue = true
p.value = value
- return
+ return p, ok
}
func skipSpace(s string, offset int) int {
diff --git a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
index a311fbb48..cadb87cc0 100644
--- a/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
+++ b/vendor/go.opentelemetry.io/otel/dependencies.Dockerfile
@@ -1,4 +1,4 @@
# This is a renovate-friendly source of Docker images.
FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
-FROM otel/weaver:v0.17.1@sha256:32523b5e44fb44418786347e9f7dde187d8797adb6d57a2ee99c245346c3cdfe AS weaver
+FROM otel/weaver:v0.19.0@sha256:3d20814cef548f1d31f27f054fb4cd6a05125641a9f7cc29fc7eb234e8052cd9 AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown
diff --git a/vendor/go.opentelemetry.io/otel/internal/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
index adb37b5b0..6db969f73 100644
--- a/vendor/go.opentelemetry.io/otel/internal/global/meter.go
+++ b/vendor/go.opentelemetry.io/otel/internal/global/meter.go
@@ -105,7 +105,7 @@ type delegatedInstrument interface {
setDelegate(metric.Meter)
}
-// instID are the identifying properties of a instrument.
+// instID are the identifying properties of an instrument.
type instID struct {
// name is the name of the stream.
name string
diff --git a/vendor/go.opentelemetry.io/otel/metric.go b/vendor/go.opentelemetry.io/otel/metric.go
index 1e6473b32..527d9aec8 100644
--- a/vendor/go.opentelemetry.io/otel/metric.go
+++ b/vendor/go.opentelemetry.io/otel/metric.go
@@ -11,7 +11,7 @@ import (
// Meter returns a Meter from the global MeterProvider. The name must be the
// name of the library providing instrumentation. This name may be the same as
// the instrumented code only if that code provides built-in instrumentation.
-// If the name is empty, then a implementation defined default name will be
+// If the name is empty, then an implementation defined default name will be
// used instead.
//
// If this is called before a global MeterProvider is registered the returned
diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go
index d9e3b13e4..e42dd6e70 100644
--- a/vendor/go.opentelemetry.io/otel/metric/config.go
+++ b/vendor/go.opentelemetry.io/otel/metric/config.go
@@ -3,7 +3,11 @@
package metric // import "go.opentelemetry.io/otel/metric"
-import "go.opentelemetry.io/otel/attribute"
+import (
+ "slices"
+
+ "go.opentelemetry.io/otel/attribute"
+)
// MeterConfig contains options for Meters.
type MeterConfig struct {
@@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption {
})
}
-// WithInstrumentationAttributes sets the instrumentation attributes.
+// WithInstrumentationAttributes adds the instrumentation attributes.
+//
+// This is equivalent to calling [WithInstrumentationAttributeSet] with an
+// [attribute.Set] created from a clone of the passed attributes.
+// [WithInstrumentationAttributeSet] is recommended for more control.
//
-// The passed attributes will be de-duplicated.
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
+ set := attribute.NewSet(slices.Clone(attr)...)
+ return WithInstrumentationAttributeSet(set)
+}
+
+// WithInstrumentationAttributeSet adds the instrumentation attributes.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
+func WithInstrumentationAttributeSet(set attribute.Set) MeterOption {
+ if set.Len() == 0 {
+ return meterOptionFunc(func(config MeterConfig) MeterConfig {
+ return config
+ })
+ }
+
return meterOptionFunc(func(config MeterConfig) MeterConfig {
- config.attrs = attribute.NewSet(attr...)
+ if config.attrs.Len() == 0 {
+ config.attrs = set
+ } else {
+ config.attrs = mergeSets(config.attrs, set)
+ }
return config
})
}
diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
index 6692d2665..271ab71f1 100644
--- a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
+++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go
@@ -111,7 +111,7 @@ func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
}
// Clear all flags other than the trace-context supported sampling bit.
- scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled
+ scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked.
// Ignore the error returned here. Failure to parse tracestate MUST NOT
// affect the parsing of traceparent according to the W3C tracecontext
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
index 666bded4b..267979c05 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/error_type.go
@@ -4,28 +4,53 @@
package semconv // import "go.opentelemetry.io/otel/semconv/v1.37.0"
import (
- "fmt"
"reflect"
"go.opentelemetry.io/otel/attribute"
)
// ErrorType returns an [attribute.KeyValue] identifying the error type of err.
+//
+// If err is nil, the returned attribute has the default value
+// [ErrorTypeOther].
+//
+// If err's type has the method
+//
+// ErrorType() string
+//
+// then the returned attribute has the value of err.ErrorType(). Otherwise, the
+// returned attribute has a value derived from the concrete type of err.
+//
+// The key of the returned attribute is [ErrorTypeKey].
func ErrorType(err error) attribute.KeyValue {
if err == nil {
return ErrorTypeOther
}
- t := reflect.TypeOf(err)
- var value string
- if t.PkgPath() == "" && t.Name() == "" {
- // Likely a builtin type.
- value = t.String()
- } else {
- value = fmt.Sprintf("%s.%s", t.PkgPath(), t.Name())
+
+ return ErrorTypeKey.String(errorType(err))
+}
+
+func errorType(err error) string {
+ var s string
+ if et, ok := err.(interface{ ErrorType() string }); ok {
+ // Prioritize the ErrorType method if available.
+ s = et.ErrorType()
}
+ if s == "" {
+ // Fallback to reflection if the ErrorType method is not supported or
+ // returns an empty value.
- if value == "" {
- return ErrorTypeOther
+ t := reflect.TypeOf(err)
+ pkg, name := t.PkgPath(), t.Name()
+ if pkg != "" && name != "" {
+ s = pkg + "." + name
+ } else {
+ // The type has no package path or name (predeclared, not-defined,
+ // or alias for a not-defined type).
+ //
+ // This is not guaranteed to be unique, but is a best effort.
+ s = t.String()
+ }
}
- return ErrorTypeKey.String(value)
+ return s
}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
index 55bde895d..a0ddf652d 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/httpconv/metric.go
@@ -91,6 +91,11 @@ type ClientActiveRequests struct {
metric.Int64UpDownCounter
}
+var newClientActiveRequestsOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP requests."),
+ metric.WithUnit("{request}"),
+}
+
// NewClientActiveRequests returns a new ClientActiveRequests instrument.
func NewClientActiveRequests(
m metric.Meter,
@@ -101,15 +106,18 @@ func NewClientActiveRequests(
return ClientActiveRequests{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientActiveRequestsOpts
+ } else {
+ opt = append(opt, newClientActiveRequestsOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"http.client.active_requests",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("Number of active HTTP requests."),
- metric.WithUnit("{request}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
+ return ClientActiveRequests{noop.Int64UpDownCounter{}}, err
}
return ClientActiveRequests{i}, nil
}
@@ -223,6 +231,11 @@ type ClientConnectionDuration struct {
metric.Float64Histogram
}
+var newClientConnectionDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
+ metric.WithUnit("s"),
+}
+
// NewClientConnectionDuration returns a new ClientConnectionDuration instrument.
func NewClientConnectionDuration(
m metric.Meter,
@@ -233,15 +246,18 @@ func NewClientConnectionDuration(
return ClientConnectionDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientConnectionDurationOpts
+ } else {
+ opt = append(opt, newClientConnectionDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"http.client.connection.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of the successfully established outbound HTTP connections."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientConnectionDuration{noop.Float64Histogram{}}, err
+ return ClientConnectionDuration{noop.Float64Histogram{}}, err
}
return ClientConnectionDuration{i}, nil
}
@@ -310,6 +326,7 @@ func (m ClientConnectionDuration) Record(
func (m ClientConnectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -353,6 +370,11 @@ type ClientOpenConnections struct {
metric.Int64UpDownCounter
}
+var newClientOpenConnectionsOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
+ metric.WithUnit("{connection}"),
+}
+
// NewClientOpenConnections returns a new ClientOpenConnections instrument.
func NewClientOpenConnections(
m metric.Meter,
@@ -363,15 +385,18 @@ func NewClientOpenConnections(
return ClientOpenConnections{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientOpenConnectionsOpts
+ } else {
+ opt = append(opt, newClientOpenConnectionsOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"http.client.open_connections",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("Number of outbound HTTP connections that are currently active or idle on the client."),
- metric.WithUnit("{connection}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
+ return ClientOpenConnections{noop.Int64UpDownCounter{}}, err
}
return ClientOpenConnections{i}, nil
}
@@ -488,6 +513,11 @@ type ClientRequestBodySize struct {
metric.Int64Histogram
}
+var newClientRequestBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client request bodies."),
+ metric.WithUnit("By"),
+}
+
// NewClientRequestBodySize returns a new ClientRequestBodySize instrument.
func NewClientRequestBodySize(
m metric.Meter,
@@ -498,15 +528,18 @@ func NewClientRequestBodySize(
return ClientRequestBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientRequestBodySizeOpts
+ } else {
+ opt = append(opt, newClientRequestBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.client.request.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP client request bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientRequestBodySize{noop.Int64Histogram{}}, err
+ return ClientRequestBodySize{noop.Int64Histogram{}}, err
}
return ClientRequestBodySize{i}, nil
}
@@ -593,6 +626,7 @@ func (m ClientRequestBodySize) Record(
func (m ClientRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -662,6 +696,11 @@ type ClientRequestDuration struct {
metric.Float64Histogram
}
+var newClientRequestDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP client requests."),
+ metric.WithUnit("s"),
+}
+
// NewClientRequestDuration returns a new ClientRequestDuration instrument.
func NewClientRequestDuration(
m metric.Meter,
@@ -672,15 +711,18 @@ func NewClientRequestDuration(
return ClientRequestDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientRequestDurationOpts
+ } else {
+ opt = append(opt, newClientRequestDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"http.client.request.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("Duration of HTTP client requests."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientRequestDuration{noop.Float64Histogram{}}, err
+ return ClientRequestDuration{noop.Float64Histogram{}}, err
}
return ClientRequestDuration{i}, nil
}
@@ -753,6 +795,7 @@ func (m ClientRequestDuration) Record(
func (m ClientRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -822,6 +865,11 @@ type ClientResponseBodySize struct {
metric.Int64Histogram
}
+var newClientResponseBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP client response bodies."),
+ metric.WithUnit("By"),
+}
+
// NewClientResponseBodySize returns a new ClientResponseBodySize instrument.
func NewClientResponseBodySize(
m metric.Meter,
@@ -832,15 +880,18 @@ func NewClientResponseBodySize(
return ClientResponseBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newClientResponseBodySizeOpts
+ } else {
+ opt = append(opt, newClientResponseBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.client.response.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP client response bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ClientResponseBodySize{noop.Int64Histogram{}}, err
+ return ClientResponseBodySize{noop.Int64Histogram{}}, err
}
return ClientResponseBodySize{i}, nil
}
@@ -927,6 +978,7 @@ func (m ClientResponseBodySize) Record(
func (m ClientResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -996,6 +1048,11 @@ type ServerActiveRequests struct {
metric.Int64UpDownCounter
}
+var newServerActiveRequestsOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("Number of active HTTP server requests."),
+ metric.WithUnit("{request}"),
+}
+
// NewServerActiveRequests returns a new ServerActiveRequests instrument.
func NewServerActiveRequests(
m metric.Meter,
@@ -1006,15 +1063,18 @@ func NewServerActiveRequests(
return ServerActiveRequests{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerActiveRequestsOpts
+ } else {
+ opt = append(opt, newServerActiveRequestsOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"http.server.active_requests",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("Number of active HTTP server requests."),
- metric.WithUnit("{request}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
+ return ServerActiveRequests{noop.Int64UpDownCounter{}}, err
}
return ServerActiveRequests{i}, nil
}
@@ -1118,6 +1178,11 @@ type ServerRequestBodySize struct {
metric.Int64Histogram
}
+var newServerRequestBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server request bodies."),
+ metric.WithUnit("By"),
+}
+
// NewServerRequestBodySize returns a new ServerRequestBodySize instrument.
func NewServerRequestBodySize(
m metric.Meter,
@@ -1128,15 +1193,18 @@ func NewServerRequestBodySize(
return ServerRequestBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerRequestBodySizeOpts
+ } else {
+ opt = append(opt, newServerRequestBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.server.request.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP server request bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerRequestBodySize{noop.Int64Histogram{}}, err
+ return ServerRequestBodySize{noop.Int64Histogram{}}, err
}
return ServerRequestBodySize{i}, nil
}
@@ -1220,6 +1288,7 @@ func (m ServerRequestBodySize) Record(
func (m ServerRequestBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1299,6 +1368,11 @@ type ServerRequestDuration struct {
metric.Float64Histogram
}
+var newServerRequestDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("Duration of HTTP server requests."),
+ metric.WithUnit("s"),
+}
+
// NewServerRequestDuration returns a new ServerRequestDuration instrument.
func NewServerRequestDuration(
m metric.Meter,
@@ -1309,15 +1383,18 @@ func NewServerRequestDuration(
return ServerRequestDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerRequestDurationOpts
+ } else {
+ opt = append(opt, newServerRequestDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"http.server.request.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("Duration of HTTP server requests."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerRequestDuration{noop.Float64Histogram{}}, err
+ return ServerRequestDuration{noop.Float64Histogram{}}, err
}
return ServerRequestDuration{i}, nil
}
@@ -1387,6 +1464,7 @@ func (m ServerRequestDuration) Record(
func (m ServerRequestDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1466,6 +1544,11 @@ type ServerResponseBodySize struct {
metric.Int64Histogram
}
+var newServerResponseBodySizeOpts = []metric.Int64HistogramOption{
+ metric.WithDescription("Size of HTTP server response bodies."),
+ metric.WithUnit("By"),
+}
+
// NewServerResponseBodySize returns a new ServerResponseBodySize instrument.
func NewServerResponseBodySize(
m metric.Meter,
@@ -1476,15 +1559,18 @@ func NewServerResponseBodySize(
return ServerResponseBodySize{noop.Int64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newServerResponseBodySizeOpts
+ } else {
+ opt = append(opt, newServerResponseBodySizeOpts...)
+ }
+
i, err := m.Int64Histogram(
"http.server.response.body.size",
- append([]metric.Int64HistogramOption{
- metric.WithDescription("Size of HTTP server response bodies."),
- metric.WithUnit("By"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return ServerResponseBodySize{noop.Int64Histogram{}}, err
+ return ServerResponseBodySize{noop.Int64Histogram{}}, err
}
return ServerResponseBodySize{i}, nil
}
@@ -1568,6 +1654,7 @@ func (m ServerResponseBodySize) Record(
func (m ServerResponseBodySize) RecordSet(ctx context.Context, val int64, set attribute.Set) {
if set.Len() == 0 {
m.Int64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1638,4 +1725,4 @@ func (ServerResponseBodySize) AttrServerPort(val int) attribute.KeyValue {
// the category of synthetic traffic, such as tests or bots.
func (ServerResponseBodySize) AttrUserAgentSyntheticType(val UserAgentSyntheticTypeAttr) attribute.KeyValue {
return attribute.String("user_agent.synthetic.type", string(val))
-}
\ No newline at end of file
+}
diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
index a78eafd1f..fd064530c 100644
--- a/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
+++ b/vendor/go.opentelemetry.io/otel/semconv/v1.37.0/otelconv/metric.go
@@ -3,7 +3,7 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
-// Package httpconv provides types and functionality for OpenTelemetry semantic
+// Package otelconv provides types and functionality for OpenTelemetry semantic
// conventions in the "otel" namespace.
package otelconv
@@ -172,6 +172,11 @@ type SDKExporterLogExported struct {
metric.Int64Counter
}
+var newSDKExporterLogExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKExporterLogExported returns a new SDKExporterLogExported instrument.
func NewSDKExporterLogExported(
m metric.Meter,
@@ -182,15 +187,18 @@ func NewSDKExporterLogExported(
return SDKExporterLogExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterLogExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterLogExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.log.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of log records for which the export has finished, either successful or failed."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterLogExported{noop.Int64Counter{}}, err
+ return SDKExporterLogExported{noop.Int64Counter{}}, err
}
return SDKExporterLogExported{i}, nil
}
@@ -319,6 +327,11 @@ type SDKExporterLogInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterLogInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKExporterLogInflight returns a new SDKExporterLogInflight instrument.
func NewSDKExporterLogInflight(
m metric.Meter,
@@ -329,15 +342,18 @@ func NewSDKExporterLogInflight(
return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterLogInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterLogInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.log.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of log records which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterLogInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterLogInflight{i}, nil
}
@@ -449,6 +465,11 @@ type SDKExporterMetricDataPointExported struct {
metric.Int64Counter
}
+var newSDKExporterMetricDataPointExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
+ metric.WithUnit("{data_point}"),
+}
+
// NewSDKExporterMetricDataPointExported returns a new
// SDKExporterMetricDataPointExported instrument.
func NewSDKExporterMetricDataPointExported(
@@ -460,15 +481,18 @@ func NewSDKExporterMetricDataPointExported(
return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterMetricDataPointExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterMetricDataPointExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.metric_data_point.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of metric data points for which the export has finished, either successful or failed."),
- metric.WithUnit("{data_point}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
+ return SDKExporterMetricDataPointExported{noop.Int64Counter{}}, err
}
return SDKExporterMetricDataPointExported{i}, nil
}
@@ -598,6 +622,11 @@ type SDKExporterMetricDataPointInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterMetricDataPointInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{data_point}"),
+}
+
// NewSDKExporterMetricDataPointInflight returns a new
// SDKExporterMetricDataPointInflight instrument.
func NewSDKExporterMetricDataPointInflight(
@@ -609,15 +638,18 @@ func NewSDKExporterMetricDataPointInflight(
return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterMetricDataPointInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterMetricDataPointInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.metric_data_point.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of metric data points which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{data_point}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterMetricDataPointInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterMetricDataPointInflight{i}, nil
}
@@ -728,6 +760,11 @@ type SDKExporterOperationDuration struct {
metric.Float64Histogram
}
+var newSDKExporterOperationDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of exporting a batch of telemetry records."),
+ metric.WithUnit("s"),
+}
+
// NewSDKExporterOperationDuration returns a new SDKExporterOperationDuration
// instrument.
func NewSDKExporterOperationDuration(
@@ -739,15 +776,18 @@ func NewSDKExporterOperationDuration(
return SDKExporterOperationDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterOperationDurationOpts
+ } else {
+ opt = append(opt, newSDKExporterOperationDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"otel.sdk.exporter.operation.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of exporting a batch of telemetry records."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
+ return SDKExporterOperationDuration{noop.Float64Histogram{}}, err
}
return SDKExporterOperationDuration{i}, nil
}
@@ -825,6 +865,7 @@ func (m SDKExporterOperationDuration) Record(
func (m SDKExporterOperationDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -893,6 +934,11 @@ type SDKExporterSpanExported struct {
metric.Int64Counter
}
+var newSDKExporterSpanExportedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKExporterSpanExported returns a new SDKExporterSpanExported instrument.
func NewSDKExporterSpanExported(
m metric.Meter,
@@ -903,15 +949,18 @@ func NewSDKExporterSpanExported(
return SDKExporterSpanExported{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterSpanExportedOpts
+ } else {
+ opt = append(opt, newSDKExporterSpanExportedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.exporter.span.exported",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of spans for which the export has finished, either successful or failed."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterSpanExported{noop.Int64Counter{}}, err
+ return SDKExporterSpanExported{noop.Int64Counter{}}, err
}
return SDKExporterSpanExported{i}, nil
}
@@ -1040,6 +1089,11 @@ type SDKExporterSpanInflight struct {
metric.Int64UpDownCounter
}
+var newSDKExporterSpanInflightOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKExporterSpanInflight returns a new SDKExporterSpanInflight instrument.
func NewSDKExporterSpanInflight(
m metric.Meter,
@@ -1050,15 +1104,18 @@ func NewSDKExporterSpanInflight(
return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKExporterSpanInflightOpts
+ } else {
+ opt = append(opt, newSDKExporterSpanInflightOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.exporter.span.inflight",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of spans which were passed to the exporter, but that have not been exported yet (neither successful, nor failed)."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
+ return SDKExporterSpanInflight{noop.Int64UpDownCounter{}}, err
}
return SDKExporterSpanInflight{i}, nil
}
@@ -1169,6 +1226,11 @@ type SDKLogCreated struct {
metric.Int64Counter
}
+var newSDKLogCreatedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKLogCreated returns a new SDKLogCreated instrument.
func NewSDKLogCreated(
m metric.Meter,
@@ -1179,15 +1241,18 @@ func NewSDKLogCreated(
return SDKLogCreated{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKLogCreatedOpts
+ } else {
+ opt = append(opt, newSDKLogCreatedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.log.created",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of logs submitted to enabled SDK Loggers."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKLogCreated{noop.Int64Counter{}}, err
+ return SDKLogCreated{noop.Int64Counter{}}, err
}
return SDKLogCreated{i}, nil
}
@@ -1254,6 +1319,11 @@ type SDKMetricReaderCollectionDuration struct {
metric.Float64Histogram
}
+var newSDKMetricReaderCollectionDurationOpts = []metric.Float64HistogramOption{
+ metric.WithDescription("The duration of the collect operation of the metric reader."),
+ metric.WithUnit("s"),
+}
+
// NewSDKMetricReaderCollectionDuration returns a new
// SDKMetricReaderCollectionDuration instrument.
func NewSDKMetricReaderCollectionDuration(
@@ -1265,15 +1335,18 @@ func NewSDKMetricReaderCollectionDuration(
return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKMetricReaderCollectionDurationOpts
+ } else {
+ opt = append(opt, newSDKMetricReaderCollectionDurationOpts...)
+ }
+
i, err := m.Float64Histogram(
"otel.sdk.metric_reader.collection.duration",
- append([]metric.Float64HistogramOption{
- metric.WithDescription("The duration of the collect operation of the metric reader."),
- metric.WithUnit("s"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
+ return SDKMetricReaderCollectionDuration{noop.Float64Histogram{}}, err
}
return SDKMetricReaderCollectionDuration{i}, nil
}
@@ -1343,6 +1416,7 @@ func (m SDKMetricReaderCollectionDuration) Record(
func (m SDKMetricReaderCollectionDuration) RecordSet(ctx context.Context, val float64, set attribute.Set) {
if set.Len() == 0 {
m.Float64Histogram.Record(ctx, val)
+ return
}
o := recOptPool.Get().(*[]metric.RecordOption)
@@ -1384,6 +1458,11 @@ type SDKProcessorLogProcessed struct {
metric.Int64Counter
}
+var newSDKProcessorLogProcessedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogProcessed returns a new SDKProcessorLogProcessed instrument.
func NewSDKProcessorLogProcessed(
m metric.Meter,
@@ -1394,15 +1473,18 @@ func NewSDKProcessorLogProcessed(
return SDKProcessorLogProcessed{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogProcessedOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogProcessedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.processor.log.processed",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of log records for which the processing has finished, either successful or failed."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
+ return SDKProcessorLogProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorLogProcessed{i}, nil
}
@@ -1515,6 +1597,11 @@ type SDKProcessorLogQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorLogQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogQueueCapacity returns a new SDKProcessorLogQueueCapacity
// instrument.
func NewSDKProcessorLogQueueCapacity(
@@ -1526,15 +1613,18 @@ func NewSDKProcessorLogQueueCapacity(
return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogQueueCapacityOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogQueueCapacityOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.capacity",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The maximum number of log records the queue of a given instance of an SDK Log Record processor can hold."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorLogQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueCapacity{i}, nil
}
@@ -1581,6 +1671,11 @@ type SDKProcessorLogQueueSize struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorLogQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
+ metric.WithUnit("{log_record}"),
+}
+
// NewSDKProcessorLogQueueSize returns a new SDKProcessorLogQueueSize instrument.
func NewSDKProcessorLogQueueSize(
m metric.Meter,
@@ -1591,15 +1686,18 @@ func NewSDKProcessorLogQueueSize(
return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorLogQueueSizeOpts
+ } else {
+ opt = append(opt, newSDKProcessorLogQueueSizeOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.log.queue.size",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The number of log records in the queue of a given instance of an SDK log processor."),
- metric.WithUnit("{log_record}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorLogQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorLogQueueSize{i}, nil
}
@@ -1646,6 +1744,11 @@ type SDKProcessorSpanProcessed struct {
metric.Int64Counter
}
+var newSDKProcessorSpanProcessedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanProcessed returns a new SDKProcessorSpanProcessed
// instrument.
func NewSDKProcessorSpanProcessed(
@@ -1657,15 +1760,18 @@ func NewSDKProcessorSpanProcessed(
return SDKProcessorSpanProcessed{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanProcessedOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanProcessedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.processor.span.processed",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of spans for which the processing has finished, either successful or failed."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
+ return SDKProcessorSpanProcessed{noop.Int64Counter{}}, err
}
return SDKProcessorSpanProcessed{i}, nil
}
@@ -1778,6 +1884,11 @@ type SDKProcessorSpanQueueCapacity struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorSpanQueueCapacityOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanQueueCapacity returns a new SDKProcessorSpanQueueCapacity
// instrument.
func NewSDKProcessorSpanQueueCapacity(
@@ -1789,15 +1900,18 @@ func NewSDKProcessorSpanQueueCapacity(
return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanQueueCapacityOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanQueueCapacityOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.capacity",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The maximum number of spans the queue of a given instance of an SDK span processor can hold."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorSpanQueueCapacity{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueCapacity{i}, nil
}
@@ -1844,6 +1958,11 @@ type SDKProcessorSpanQueueSize struct {
metric.Int64ObservableUpDownCounter
}
+var newSDKProcessorSpanQueueSizeOpts = []metric.Int64ObservableUpDownCounterOption{
+ metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKProcessorSpanQueueSize returns a new SDKProcessorSpanQueueSize
// instrument.
func NewSDKProcessorSpanQueueSize(
@@ -1855,15 +1974,18 @@ func NewSDKProcessorSpanQueueSize(
return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKProcessorSpanQueueSizeOpts
+ } else {
+ opt = append(opt, newSDKProcessorSpanQueueSizeOpts...)
+ }
+
i, err := m.Int64ObservableUpDownCounter(
"otel.sdk.processor.span.queue.size",
- append([]metric.Int64ObservableUpDownCounterOption{
- metric.WithDescription("The number of spans in the queue of a given instance of an SDK span processor."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
+ return SDKProcessorSpanQueueSize{noop.Int64ObservableUpDownCounter{}}, err
}
return SDKProcessorSpanQueueSize{i}, nil
}
@@ -1910,6 +2032,11 @@ type SDKSpanLive struct {
metric.Int64UpDownCounter
}
+var newSDKSpanLiveOpts = []metric.Int64UpDownCounterOption{
+ metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKSpanLive returns a new SDKSpanLive instrument.
func NewSDKSpanLive(
m metric.Meter,
@@ -1920,15 +2047,18 @@ func NewSDKSpanLive(
return SDKSpanLive{noop.Int64UpDownCounter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKSpanLiveOpts
+ } else {
+ opt = append(opt, newSDKSpanLiveOpts...)
+ }
+
i, err := m.Int64UpDownCounter(
"otel.sdk.span.live",
- append([]metric.Int64UpDownCounterOption{
- metric.WithDescription("The number of created spans with `recording=true` for which the end operation has not been called yet."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKSpanLive{noop.Int64UpDownCounter{}}, err
+ return SDKSpanLive{noop.Int64UpDownCounter{}}, err
}
return SDKSpanLive{i}, nil
}
@@ -2013,6 +2143,11 @@ type SDKSpanStarted struct {
metric.Int64Counter
}
+var newSDKSpanStartedOpts = []metric.Int64CounterOption{
+ metric.WithDescription("The number of created spans."),
+ metric.WithUnit("{span}"),
+}
+
// NewSDKSpanStarted returns a new SDKSpanStarted instrument.
func NewSDKSpanStarted(
m metric.Meter,
@@ -2023,15 +2158,18 @@ func NewSDKSpanStarted(
return SDKSpanStarted{noop.Int64Counter{}}, nil
}
+ if len(opt) == 0 {
+ opt = newSDKSpanStartedOpts
+ } else {
+ opt = append(opt, newSDKSpanStartedOpts...)
+ }
+
i, err := m.Int64Counter(
"otel.sdk.span.started",
- append([]metric.Int64CounterOption{
- metric.WithDescription("The number of created spans."),
- metric.WithUnit("{span}"),
- }, opt...)...,
+ opt...,
)
if err != nil {
- return SDKSpanStarted{noop.Int64Counter{}}, err
+ return SDKSpanStarted{noop.Int64Counter{}}, err
}
return SDKSpanStarted{i}, nil
}
@@ -2123,4 +2261,4 @@ func (SDKSpanStarted) AttrSpanParentOrigin(val SpanParentOriginAttr) attribute.K
// value of the sampler for this span.
func (SDKSpanStarted) AttrSpanSamplingResult(val SpanSamplingResultAttr) attribute.KeyValue {
return attribute.String("otel.span.sampling_result", string(val))
-}
\ No newline at end of file
+}
diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go
index aea11a2b5..d9ecef1ca 100644
--- a/vendor/go.opentelemetry.io/otel/trace/config.go
+++ b/vendor/go.opentelemetry.io/otel/trace/config.go
@@ -4,6 +4,7 @@
package trace // import "go.opentelemetry.io/otel/trace"
import (
+ "slices"
"time"
"go.opentelemetry.io/otel/attribute"
@@ -304,12 +305,50 @@ func WithInstrumentationVersion(version string) TracerOption {
})
}
-// WithInstrumentationAttributes sets the instrumentation attributes.
+// mergeSets returns the union of keys between a and b. Any duplicate keys will
+// use the value associated with b.
+func mergeSets(a, b attribute.Set) attribute.Set {
+ // NewMergeIterator uses the first value for any duplicates.
+ iter := attribute.NewMergeIterator(&b, &a)
+ merged := make([]attribute.KeyValue, 0, a.Len()+b.Len())
+ for iter.Next() {
+ merged = append(merged, iter.Attribute())
+ }
+ return attribute.NewSet(merged...)
+}
+
+// WithInstrumentationAttributes adds the instrumentation attributes.
//
-// The passed attributes will be de-duplicated.
+// This is equivalent to calling [WithInstrumentationAttributeSet] with an
+// [attribute.Set] created from a clone of the passed attributes.
+// [WithInstrumentationAttributeSet] is recommended for more control.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) TracerOption {
+ set := attribute.NewSet(slices.Clone(attr)...)
+ return WithInstrumentationAttributeSet(set)
+}
+
+// WithInstrumentationAttributeSet adds the instrumentation attributes.
+//
+// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
+// options are passed, the attributes will be merged together in the order
+// they are passed. Attributes with duplicate keys will use the last value passed.
+func WithInstrumentationAttributeSet(set attribute.Set) TracerOption {
+ if set.Len() == 0 {
+ return tracerOptionFunc(func(config TracerConfig) TracerConfig {
+ return config
+ })
+ }
+
return tracerOptionFunc(func(config TracerConfig) TracerConfig {
- config.attrs = attribute.NewSet(attr...)
+ if config.attrs.Len() == 0 {
+ config.attrs = set
+ } else {
+ config.attrs = mergeSets(config.attrs, set)
+ }
return config
})
}
diff --git a/vendor/go.opentelemetry.io/otel/trace/span.go b/vendor/go.opentelemetry.io/otel/trace/span.go
index d3aa476ee..d01e79366 100644
--- a/vendor/go.opentelemetry.io/otel/trace/span.go
+++ b/vendor/go.opentelemetry.io/otel/trace/span.go
@@ -66,6 +66,10 @@ type Span interface {
// SetAttributes sets kv as attributes of the Span. If a key from kv
// already exists for an attribute of the Span it will be overwritten with
// the value contained in kv.
+ //
+ // Note that adding attributes at span creation using [WithAttributes] is preferred
+ // to calling SetAttribute later, as samplers can only consider information
+ // already present during span creation.
SetAttributes(kv ...attribute.KeyValue)
// TracerProvider returns a TracerProvider that can be used to generate
diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go
index bcaa5aa53..0d5b02918 100644
--- a/vendor/go.opentelemetry.io/otel/version.go
+++ b/vendor/go.opentelemetry.io/otel/version.go
@@ -5,5 +5,5 @@ package otel // import "go.opentelemetry.io/otel"
// Version is the current release version of OpenTelemetry in use.
func Version() string {
- return "1.38.0"
+ return "1.39.0"
}
diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml
index 07145e254..f4a3893eb 100644
--- a/vendor/go.opentelemetry.io/otel/versions.yaml
+++ b/vendor/go.opentelemetry.io/otel/versions.yaml
@@ -3,7 +3,7 @@
module-sets:
stable-v1:
- version: v1.38.0
+ version: v1.39.0
modules:
- go.opentelemetry.io/otel
- go.opentelemetry.io/otel/bridge/opencensus
@@ -22,11 +22,11 @@ module-sets:
- go.opentelemetry.io/otel/sdk/metric
- go.opentelemetry.io/otel/trace
experimental-metrics:
- version: v0.60.0
+ version: v0.61.0
modules:
- go.opentelemetry.io/otel/exporters/prometheus
experimental-logs:
- version: v0.14.0
+ version: v0.15.0
modules:
- go.opentelemetry.io/otel/log
- go.opentelemetry.io/otel/log/logtest
@@ -36,9 +36,28 @@ module-sets:
- go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- go.opentelemetry.io/otel/exporters/stdout/stdoutlog
experimental-schema:
- version: v0.0.13
+ version: v0.0.14
modules:
- go.opentelemetry.io/otel/schema
excluded-modules:
- go.opentelemetry.io/otel/internal/tools
- go.opentelemetry.io/otel/trace/internal/telemetry/test
+modules:
+ go.opentelemetry.io/otel/exporters/stdout/stdouttrace:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/prometheus:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp:
+ version-refs:
+ - ./internal/version.go
+ go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp:
+ version-refs:
+ - ./internal/version.go
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 633ed2839..c2caa1d22 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -89,7 +89,7 @@ github.com/blang/semver/v4
# github.com/cenkalti/backoff/v5 v5.0.3
## explicit; go 1.23
github.com/cenkalti/backoff/v5
-# github.com/cert-manager/cert-manager v1.19.2
+# github.com/cert-manager/cert-manager v1.19.3
## explicit; go 1.25.0
github.com/cert-manager/cert-manager/pkg/apis/acme
github.com/cert-manager/cert-manager/pkg/apis/acme/v1
@@ -317,10 +317,10 @@ github.com/go-logr/logr/funcr
# github.com/go-logr/stdr v1.2.2
## explicit; go 1.16
github.com/go-logr/stdr
-# github.com/go-openapi/jsonpointer v0.22.3
+# github.com/go-openapi/jsonpointer v0.22.4
## explicit; go 1.24.0
github.com/go-openapi/jsonpointer
-# github.com/go-openapi/jsonreference v0.21.3
+# github.com/go-openapi/jsonreference v0.21.4
## explicit; go 1.24.0
github.com/go-openapi/jsonreference
github.com/go-openapi/jsonreference/internal
@@ -851,11 +851,12 @@ go.opentelemetry.io/auto/sdk/internal/telemetry
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/request
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/internal/semconv
-# go.opentelemetry.io/otel v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel v1.39.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel
go.opentelemetry.io/otel/attribute
go.opentelemetry.io/otel/attribute/internal
+go.opentelemetry.io/otel/attribute/internal/xxhash
go.opentelemetry.io/otel/baggage
go.opentelemetry.io/otel/codes
go.opentelemetry.io/otel/internal/baggage
@@ -877,8 +878,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/envconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/otlpconfig
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc/internal/retry
-# go.opentelemetry.io/otel/metric v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/metric v1.39.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/metric
go.opentelemetry.io/otel/metric/embedded
go.opentelemetry.io/otel/metric/noop
@@ -891,8 +892,8 @@ go.opentelemetry.io/otel/sdk/internal/x
go.opentelemetry.io/otel/sdk/resource
go.opentelemetry.io/otel/sdk/trace
go.opentelemetry.io/otel/sdk/trace/internal/x
-# go.opentelemetry.io/otel/trace v1.38.0
-## explicit; go 1.23.0
+# go.opentelemetry.io/otel/trace v1.39.0
+## explicit; go 1.24.0
go.opentelemetry.io/otel/trace
go.opentelemetry.io/otel/trace/embedded
go.opentelemetry.io/otel/trace/internal/telemetry
@@ -1985,7 +1986,7 @@ oras.land/oras-go/v2/registry/remote/credentials/trace
oras.land/oras-go/v2/registry/remote/errcode
oras.land/oras-go/v2/registry/remote/internal/errutil
oras.land/oras-go/v2/registry/remote/retry
-# pkg.package-operator.run/boxcutter v0.8.0
+# pkg.package-operator.run/boxcutter v0.8.1
## explicit; go 1.24.6
pkg.package-operator.run/boxcutter
pkg.package-operator.run/boxcutter/machinery
diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go b/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go
index 3a2665ca2..6040d7ea9 100644
--- a/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go
+++ b/vendor/pkg.package-operator.run/boxcutter/machinery/errors.go
@@ -15,6 +15,19 @@ type CreateCollisionError struct {
msg string
}
+// NewCreateCollisionError creates a new CreateCollisionError.
+func NewCreateCollisionError(obj client.Object, msg string) *CreateCollisionError {
+ return &CreateCollisionError{
+ object: obj,
+ msg: msg,
+ }
+}
+
+// Object is the object reference that caused the error.
+func (e CreateCollisionError) Object() client.Object {
+ return e.object
+}
+
// Error implements golangs error interface.
func (e CreateCollisionError) Error() string {
return fmt.Sprintf("%s: %s", e.object, e.msg)
diff --git a/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go b/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go
index 5da668ff1..d81e54e6c 100644
--- a/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go
+++ b/vendor/pkg.package-operator.run/boxcutter/machinery/objects.go
@@ -237,7 +237,7 @@ func (e *ObjectEngine) Reconcile(
if errors.IsAlreadyExists(err) {
// Might be a slow cache or an object created by a different actor
// but excluded by the cache selector.
- return nil, &CreateCollisionError{object: desiredObject, msg: err.Error()}
+ return nil, NewCreateCollisionError(desiredObject, err.Error())
}
if err != nil {