diff -pruN 2.31.2+ds1-1/CHANGELOG.md 2.33.5+ds1-2/CHANGELOG.md
--- 2.31.2+ds1-1/CHANGELOG.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/CHANGELOG.md	2022-03-08 16:34:32.000000000 +0000
@@ -1,3 +1,93 @@
+## 2.33.5 / 2022-03-08
+
+The binaries published with this release are built with Go1.17.8 to avoid [CVE-2022-24921](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-24921).
+
+* [BUGFIX] Remote-write: Fix deadlock between adding to queue and getting batch. #10395
+
+## 2.33.4 / 2022-02-22
+
+* [BUGFIX] TSDB: Fix panic when m-mapping head chunks onto the disk. #10316
+
+## 2.33.3 / 2022-02-11
+
+* [BUGFIX] Azure SD: Fix a regression when public IP Address isn't set. #10289
+
+## 2.33.2 / 2022-02-11
+
+* [BUGFIX] Azure SD: Fix panic when public IP Address isn't set. #10280
+* [BUGFIX] Remote-write: Fix deadlock when stopping a shard. #10279
+
+## 2.33.1 / 2022-02-02
+
+* [BUGFIX] SD: Fix _no such file or directory_ in K8s SD when not running inside K8s. #10235
+
+## 2.33.0 / 2022-01-29
+
+* [CHANGE] PromQL: Promote negative offset and `@` modifer to stable features. #10121
+* [CHANGE] Web: Promote remote-write-receiver to stable. #10119
+* [FEATURE] Config: Add `stripPort` template function. #10002
+* [FEATURE] Promtool: Add cardinality analysis to `check metrics`, enabled by flag `--extended`. #10045
+* [FEATURE] SD: Enable target discovery in own K8s namespace. #9881
+* [FEATURE] SD: Add provider ID label in K8s SD. #9603
+* [FEATURE] Web: Add limit field to the rules API. #10152
+* [ENHANCEMENT] Remote-write: Avoid allocations by buffering concrete structs instead of interfaces. #9934
+* [ENHANCEMENT] Remote-write: Log time series details for out-of-order samples in remote write receiver. #9894
+* [ENHANCEMENT] Remote-write: Shard up more when backlogged. #9274
+* [ENHANCEMENT] TSDB: Use simpler map key to improve exemplar ingest performance. #10111
+* [ENHANCEMENT] TSDB: Avoid allocations when popping from the intersected postings heap. #10092
+* [ENHANCEMENT] TSDB: Make chunk writing non-blocking, avoiding latency spikes in remote-write. #10051
+* [ENHANCEMENT] TSDB: Improve label matching performance. #9907
+* [ENHANCEMENT] UI: Optimize the service discovery page and add a search bar. #10131
+* [ENHANCEMENT] UI: Optimize the target page and add a search bar. #10103
+* [BUGFIX] Promtool: Make exit codes more consistent. #9861
+* [BUGFIX] Promtool: Fix flakiness of rule testing. #8818
+* [BUGFIX] Remote-write: Update `prometheus_remote_storage_queue_highest_sent_timestamp_seconds` metric when write irrecoverably fails. #10102
+* [BUGFIX] Storage: Avoid panic in `BufferedSeriesIterator`. #9945
+* [BUGFIX] TSDB: CompactBlockMetas should produce correct mint/maxt for overlapping blocks. #10108
+* [BUGFIX] TSDB: Fix logging of exemplar storage size. #9938
+* [BUGFIX] UI: Fix overlapping click targets for the alert state checkboxes. #10136
+* [BUGFIX] UI: Fix _Unhealthy_ filter on target page to actually display only _Unhealthy_ targets. #10103
+* [BUGFIX] UI: Fix autocompletion when expression is empty. #10053
+* [BUGFIX] TSDB: Fix deadlock from simultaneous GC and write. #10166
+
+## 2.32.1 / 2021-12-17
+
+* [BUGFIX] Scrape: Fix reporting metrics when sample limit is reached during the report. #9996
+* [BUGFIX] Scrape: Ensure that scrape interval and scrape timeout are always set. #10023
+* [BUGFIX] TSDB: Expose and fix bug in iterators' `Seek()` method. #10030
+
+## 2.32.0 / 2021-12-09
+
+This release introduces the Prometheus Agent, a new mode of operation for
+Prometheus optimized for remote-write only scenarios. In this mode, Prometheus
+does not generate blocks on the local filesystem and is not queryable locally.
+Enable with `--enable-feature=agent`.
+
+Learn more about the Prometheus Agent in our [blog post](https://prometheus.io/blog/2021/11/16/agent/).
+
+* [CHANGE] Remote-write: Change default max retry time from 100ms to 5 seconds. #9634
+* [FEATURE] Agent: New mode of operation optimized for remote-write only scenarios, without local storage. Enable with `--enable-feature=agent`. #8785 #9851 #9664 #9939 #9941 #9943
+* [FEATURE] Promtool: Add `promtool check service-discovery` command. #8970
+* [FEATURE] UI: Add search in metrics dropdown. #9629
+* [FEATURE] Templates: Add parseDuration to template functions. #8817
+* [ENHANCEMENT] Promtool: Improve test output. #8064
+* [ENHANCEMENT] Promtool: Use kahan summation for better numerical stability. #9588
+* [ENHANCEMENT] Remote-write: Reuse memory for marshalling. #9412
+* [ENHANCEMENT] Scrape: Add `scrape_body_size_bytes` scrape metric behind the `--enable-feature=extra-scrape-metrics` flag. #9569
+* [ENHANCEMENT] TSDB: Add windows arm64 support. #9703
+* [ENHANCEMENT] TSDB: Optimize query by skipping unneeded sorting in TSDB. #9673
+* [ENHANCEMENT] Templates: Support int and uint as datatypes for template formatting. #9680
+* [ENHANCEMENT] UI: Prefer `rate` over `rad`, `delta` over `deg`, and `count` over `cos` in autocomplete. #9688
+* [ENHANCEMENT] Linode SD: Tune API request page sizes. #9779
+* [BUGFIX] TSDB: Add more size checks when writing individual sections in the index. #9710
+* [BUGFIX] PromQL: Make `deriv()` return zero values for constant series. #9728
+* [BUGFIX] TSDB: Fix panic when checkpoint directory is empty. #9687
+* [BUGFIX] TSDB: Fix panic, out of order chunks, and race warning during WAL replay. #9856
+* [BUGFIX] UI: Correctly render links for targets with IPv6 addresses that contain a Zone ID. #9853
+* [BUGFIX] Promtool: Fix checking of `authorization.credentials_file` and `bearer_token_file` fields. #9883
+* [BUGFIX] Uyuni SD: Fix null pointer exception during initialization. #9924 #9950
+* [BUGFIX] TSDB: Fix queries after a failed snapshot replay. #9980
+
 ## 2.31.2 / 2021-12-09
 
 * [BUGFIX] TSDB: Fix queries after a failed snapshot replay. #9980
@@ -32,6 +122,10 @@
 * [BUGFIX] TSDB: Fix memory leak in samples deletion. #9151
 * [BUGFIX] UI: Use consistent margin-bottom for all alert kinds. #9318
 
+## 2.30.4 / 2021-12-09
+
+* [BUGFIX] TSDB: Fix queries after a failed snapshot replay. #9980
+
 ## 2.30.3 / 2021-10-05
 
 * [BUGFIX] TSDB: Fix panic on failed snapshot replay. #9438
@@ -225,7 +319,7 @@ Alertmanager API v2 was released in Aler
 * [ENHANCEMENT] Mixins: Scope grafana configuration. #8332
 * [ENHANCEMENT] Kubernetes SD: Add endpoint labels metadata. #8273
 * [ENHANCEMENT] UI: Expose total number of label pairs in head in TSDB stats page. #8343
-* [ENHANCEMENT] TSDB: Reload blocks every minute, to detect new blocks and enforce retention more often. #8343
+* [ENHANCEMENT] TSDB: Reload blocks every minute, to detect new blocks and enforce retention more often. #8340
 * [BUGFIX] API: Fix global URL when external address has no port. #8359
 * [BUGFIX] Backfill: Fix error message handling. #8432
 * [BUGFIX] Backfill: Fix "add sample: out of bounds" error when series span an entire block. #8476
diff -pruN 2.31.2+ds1-1/.circleci/config.yml 2.33.5+ds1-2/.circleci/config.yml
--- 2.31.2+ds1-1/.circleci/config.yml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/.circleci/config.yml	2022-03-08 16:34:32.000000000 +0000
@@ -2,7 +2,7 @@
 version: 2.1
 
 orbs:
-  prometheus: prometheus/prometheus@0.11.0
+  prometheus: prometheus/prometheus@0.15.0
   go: circleci/go@1.7.0
   win: circleci/windows@2.3.0
 
@@ -12,9 +12,9 @@ executors:
   golang:
     docker:
       - image: quay.io/prometheus/golang-builder:1.17-base
-  golang_115:
+  golang_oldest:
     docker:
-      - image: quay.io/prometheus/golang-builder:1.15-base
+      - image: quay.io/prometheus/golang-builder:1.16-base
 
 jobs:
   test_go:
@@ -36,6 +36,7 @@ jobs:
             GOOPTS: "-p 2"
             GOMAXPROCS: "2"
             GO111MODULE: "on"
+      - run: go test ./tsdb/ -test.tsdb-isolation=false
       - prometheus/check_proto:
           version: "3.15.8"
       - prometheus/store_artifact:
@@ -88,32 +89,26 @@ jobs:
             GOGC: "20"
             GOOPTS: "-p 2"
 
-  test_tsdb_go115:
-    executor: golang_115
+  test_golang_oldest:
+    executor: golang_oldest
     steps:
       - checkout
+      - run: make build
       - run: go test ./tsdb/...
+      - run: go test ./tsdb/ -test.tsdb-isolation=false
 
   test_mixins:
     executor: golang
     steps:
       - checkout
       - run: go install ./cmd/promtool/.
-      - run:
-          command: go install -mod=readonly github.com/google/go-jsonnet/cmd/jsonnet github.com/google/go-jsonnet/cmd/jsonnetfmt github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
-          working_directory: ~/project/documentation/prometheus-mixin
-      - run:
-          command: make clean
-          working_directory: ~/project/documentation/prometheus-mixin
-      - run:
-          command: jb install
-          working_directory: ~/project/documentation/prometheus-mixin
-      - run:
-          command: make
-          working_directory: ~/project/documentation/prometheus-mixin
-      - run:
-          command: git diff --exit-code
-          working_directory: ~/project/documentation/prometheus-mixin
+      - run: go install github.com/google/go-jsonnet/cmd/jsonnet@latest
+      - run: go install github.com/google/go-jsonnet/cmd/jsonnetfmt@latest
+      - run: go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest
+      - run: make -C documentation/prometheus-mixin clean
+      - run: make -C documentation/prometheus-mixin jb_install
+      - run: make -C documentation/prometheus-mixin
+      - run: git diff --exit-code
 
   repo_sync:
     executor: golang
@@ -134,7 +129,7 @@ workflows:
           filters:
             tags:
               only: /.*/
-      - test_tsdb_go115:
+      - test_golang_oldest:
           filters:
             tags:
               only: /.*/
@@ -148,8 +143,19 @@ workflows:
               only: /.*/
       - prometheus/build:
           name: build
+          parallelism: 3
+          promu_opts: "-p linux/amd64 -p windows/amd64 -p linux/arm64 -p darwin/amd64 -p darwin/arm64 -p linux/386"
+          filters:
+            tags:
+              ignore: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
+            branches:
+              ignore: /^(main|release-.*|.*build-all.*)$/
+      - prometheus/build:
+          name: build_all
           parallelism: 12
           filters:
+            branches:
+              only: /^(main|release-.*|.*build-all.*)$/
             tags:
               only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
       - prometheus/publish_main:
@@ -157,7 +163,7 @@ workflows:
           requires:
             - test_go
             - test_ui
-            - build
+            - build_all
           filters:
             branches:
               only: main
@@ -167,7 +173,7 @@ workflows:
           requires:
             - test_go
             - test_ui
-            - build
+            - build_all
           filters:
             tags:
               only: /^v[0-9]+(\.[0-9]+){2}(-.+|[^-.]*)$/
diff -pruN 2.31.2+ds1-1/cmd/prometheus/main.go 2.33.5+ds1-2/cmd/prometheus/main.go
--- 2.31.2+ds1-1/cmd/prometheus/main.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/prometheus/main.go	2022-03-08 16:34:32.000000000 +0000
@@ -60,23 +60,26 @@ import (
 	_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
 	"github.com/prometheus/prometheus/discovery/legacymanager"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/relabel"
 	"github.com/prometheus/prometheus/notifier"
-	"github.com/prometheus/prometheus/pkg/exemplar"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/logging"
-	"github.com/prometheus/prometheus/pkg/relabel"
-	prom_runtime "github.com/prometheus/prometheus/pkg/runtime"
 	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/rules"
 	"github.com/prometheus/prometheus/scrape"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/storage/remote"
 	"github.com/prometheus/prometheus/tsdb"
+	"github.com/prometheus/prometheus/tsdb/agent"
+	"github.com/prometheus/prometheus/util/logging"
+	prom_runtime "github.com/prometheus/prometheus/util/runtime"
 	"github.com/prometheus/prometheus/util/strutil"
 	"github.com/prometheus/prometheus/web"
 )
 
 var (
+	appName = "prometheus"
+
 	configSuccess = prometheus.NewGauge(prometheus.GaugeOpts{
 		Name: "prometheus_config_last_reload_successful",
 		Help: "Whether the last configuration reload attempt was successful.",
@@ -88,10 +91,13 @@ var (
 
 	defaultRetentionString   = "15d"
 	defaultRetentionDuration model.Duration
+
+	agentMode                       bool
+	agentOnlyFlags, serverOnlyFlags []string
 )
 
 func init() {
-	prometheus.MustRegister(version.NewCollector("prometheus"))
+	prometheus.MustRegister(version.NewCollector(strings.ReplaceAll(appName, "-", "_")))
 
 	var err error
 	defaultRetentionDuration, err = model.ParseDuration(defaultRetentionString)
@@ -100,10 +106,31 @@ func init() {
 	}
 }
 
+// serverOnlyFlag creates server-only kingpin flag.
+func serverOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
+	return app.Flag(name, fmt.Sprintf("%s Use with server mode only.", help)).
+		PreAction(func(parseContext *kingpin.ParseContext) error {
+			// This will be invoked only if flag is actually provided by user.
+			serverOnlyFlags = append(serverOnlyFlags, "--"+name)
+			return nil
+		})
+}
+
+// agentOnlyFlag creates agent-only kingpin flag.
+func agentOnlyFlag(app *kingpin.Application, name, help string) *kingpin.FlagClause {
+	return app.Flag(name, fmt.Sprintf("%s Use with agent mode only.", help)).
+		PreAction(func(parseContext *kingpin.ParseContext) error {
+			// This will be invoked only if flag is actually provided by user.
+			agentOnlyFlags = append(agentOnlyFlags, "--"+name)
+			return nil
+		})
+}
+
 type flagConfig struct {
 	configFile string
 
-	localStoragePath    string
+	agentStoragePath    string
+	serverStoragePath   string
 	notifier            notifier.Options
 	forGracePeriod      model.Duration
 	outageTolerance     model.Duration
@@ -111,6 +138,7 @@ type flagConfig struct {
 	web                 web.Options
 	scrape              scrape.Options
 	tsdb                tsdbOptions
+	agent               agentOptions
 	lookbackDelta       model.Duration
 	webTimeout          model.Duration
 	queryTimeout        model.Duration
@@ -121,8 +149,6 @@ type flagConfig struct {
 	featureList []string
 	// These options are extracted from featureList
 	// for ease of use.
-	enablePromQLAtModifier     bool
-	enablePromQLNegativeOffset bool
 	enableExpandExternalLabels bool
 	enableNewSDManager         bool
 
@@ -138,15 +164,9 @@ func (c *flagConfig) setFeatureListOptio
 		opts := strings.Split(f, ",")
 		for _, o := range opts {
 			switch o {
-			case "promql-at-modifier":
-				c.enablePromQLAtModifier = true
-				level.Info(logger).Log("msg", "Experimental promql-at-modifier enabled")
-			case "promql-negative-offset":
-				c.enablePromQLNegativeOffset = true
-				level.Info(logger).Log("msg", "Experimental promql-negative-offset enabled")
 			case "remote-write-receiver":
-				c.web.RemoteWriteReceiver = true
-				level.Info(logger).Log("msg", "Experimental remote-write-receiver enabled")
+				c.web.EnableRemoteWriteReceiver = true
+				level.Warn(logger).Log("msg", "Remote write receiver enabled via feature flag remote-write-receiver. This is DEPRECATED. Use --web.enable-remote-write-receiver.")
 			case "expand-external-labels":
 				c.enableExpandExternalLabels = true
 				level.Info(logger).Log("msg", "Experimental expand-external-labels enabled")
@@ -162,8 +182,13 @@ func (c *flagConfig) setFeatureListOptio
 			case "new-service-discovery-manager":
 				c.enableNewSDManager = true
 				level.Info(logger).Log("msg", "Experimental service discovery manager")
+			case "agent":
+				agentMode = true
+				level.Info(logger).Log("msg", "Experimental agent mode enabled.")
 			case "":
 				continue
+			case "promql-at-modifier", "promql-negative-offset":
+				level.Warn(logger).Log("msg", "This option for --enable-feature is now permanently enabled and therefore a no-op.", "option", o)
 			default:
 				level.Warn(logger).Log("msg", "Unknown option for --enable-feature", "option", o)
 			}
@@ -196,7 +221,7 @@ func main() {
 
 	a := kingpin.New(filepath.Base(os.Args[0]), "The Prometheus monitoring server").UsageWriter(os.Stdout)
 
-	a.Version(version.Print("prometheus"))
+	a.Version(version.Print(appName))
 
 	a.HelpFlag.Short('h')
 
@@ -232,6 +257,9 @@ func main() {
 	a.Flag("web.enable-admin-api", "Enable API endpoints for admin control actions.").
 		Default("false").BoolVar(&cfg.web.EnableAdminAPI)
 
+	a.Flag("web.enable-remote-write-receiver", "Enable API endpoint accepting remote write requests.").
+		Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
+
 	a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
 		Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
 
@@ -244,61 +272,86 @@ func main() {
 	a.Flag("web.cors.origin", `Regex for CORS origin. It is fully anchored. Example: 'https?://(domain1|domain2)\.com'`).
 		Default(".*").StringVar(&cfg.corsRegexString)
 
-	a.Flag("storage.tsdb.path", "Base path for metrics storage.").
-		Default("data/").StringVar(&cfg.localStoragePath)
+	serverOnlyFlag(a, "storage.tsdb.path", "Base path for metrics storage.").
+		Default("data/").StringVar(&cfg.serverStoragePath)
 
-	a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
+	serverOnlyFlag(a, "storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
 		Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration)
 
-	a.Flag("storage.tsdb.max-block-duration",
+	serverOnlyFlag(a, "storage.tsdb.max-block-duration",
 		"Maximum duration compacted blocks may span. For use in testing. (Defaults to 10% of the retention period.)").
 		Hidden().PlaceHolder("<duration>").SetValue(&cfg.tsdb.MaxBlockDuration)
 
-	a.Flag("storage.tsdb.max-block-chunk-segment-size",
+	serverOnlyFlag(a, "storage.tsdb.max-block-chunk-segment-size",
 		"The maximum size for a single chunk segment in a block. Example: 512MB").
 		Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.MaxBlockChunkSegmentSize)
 
-	a.Flag("storage.tsdb.wal-segment-size",
+	serverOnlyFlag(a, "storage.tsdb.wal-segment-size",
 		"Size at which to split the tsdb WAL segment files. Example: 100MB").
 		Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.tsdb.WALSegmentSize)
 
-	a.Flag("storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
+	serverOnlyFlag(a, "storage.tsdb.retention", "[DEPRECATED] How long to retain samples in storage. This flag has been deprecated, use \"storage.tsdb.retention.time\" instead.").
 		SetValue(&oldFlagRetentionDuration)
 
-	a.Flag("storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
+	serverOnlyFlag(a, "storage.tsdb.retention.time", "How long to retain samples in storage. When this flag is set it overrides \"storage.tsdb.retention\". If neither this flag nor \"storage.tsdb.retention\" nor \"storage.tsdb.retention.size\" is set, the retention time defaults to "+defaultRetentionString+". Units Supported: y, w, d, h, m, s, ms.").
 		SetValue(&newFlagRetentionDuration)
 
-	a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
+	serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
 		BytesVar(&cfg.tsdb.MaxBytes)
 
-	a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
+	serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
 		Default("false").BoolVar(&cfg.tsdb.NoLockfile)
 
-	a.Flag("storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
+	serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
 		Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
 
-	a.Flag("storage.tsdb.wal-compression", "Compress the tsdb WAL.").
+	serverOnlyFlag(a, "storage.tsdb.wal-compression", "Compress the tsdb WAL.").
 		Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
 
+	agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
+		Default("data-agent/").StringVar(&cfg.agentStoragePath)
+
+	agentOnlyFlag(a, "storage.agent.wal-segment-size",
+		"Size at which to split WAL segment files. Example: 100MB").
+		Hidden().PlaceHolder("<bytes>").BytesVar(&cfg.agent.WALSegmentSize)
+
+	agentOnlyFlag(a, "storage.agent.wal-compression", "Compress the agent WAL.").
+		Default("true").BoolVar(&cfg.agent.WALCompression)
+
+	agentOnlyFlag(a, "storage.agent.wal-truncate-frequency",
+		"The frequency at which to truncate the WAL and remove old data.").
+		Hidden().PlaceHolder("<duration>").SetValue(&cfg.agent.TruncateFrequency)
+
+	agentOnlyFlag(a, "storage.agent.retention.min-time",
+		"Minimum age samples may be before being considered for deletion when the WAL is truncated").
+		SetValue(&cfg.agent.MinWALTime)
+
+	agentOnlyFlag(a, "storage.agent.retention.max-time",
+		"Maximum age samples may be before being forcibly deleted when the WAL is truncated").
+		SetValue(&cfg.agent.MaxWALTime)
+
+	agentOnlyFlag(a, "storage.agent.no-lockfile", "Do not create lockfile in data directory.").
+		Default("false").BoolVar(&cfg.agent.NoLockfile)
+
 	a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
 		Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
 
-	a.Flag("storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
+	serverOnlyFlag(a, "storage.remote.read-sample-limit", "Maximum overall number of samples to return via the remote read interface, in a single query. 0 means no limit. This limit is ignored for streamed response types.").
 		Default("5e7").IntVar(&cfg.web.RemoteReadSampleLimit)
 
-	a.Flag("storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
+	serverOnlyFlag(a, "storage.remote.read-concurrent-limit", "Maximum number of concurrent remote read calls. 0 means no limit.").
 		Default("10").IntVar(&cfg.web.RemoteReadConcurrencyLimit)
 
-	a.Flag("storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
+	serverOnlyFlag(a, "storage.remote.read-max-bytes-in-frame", "Maximum number of bytes in a single frame for streaming remote read response types before marshalling. Note that client might have limit on frame size as well. 1MB as recommended by protobuf by default.").
 		Default("1048576").IntVar(&cfg.web.RemoteReadBytesInFrame)
 
-	a.Flag("rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
+	serverOnlyFlag(a, "rules.alert.for-outage-tolerance", "Max time to tolerate prometheus outage for restoring \"for\" state of alert.").
 		Default("1h").SetValue(&cfg.outageTolerance)
 
-	a.Flag("rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.").
+	serverOnlyFlag(a, "rules.alert.for-grace-period", "Minimum duration between alert and restored \"for\" state. This is maintained only for alerts with configured \"for\" time greater than grace period.").
 		Default("10m").SetValue(&cfg.forGracePeriod)
 
-	a.Flag("rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
+	serverOnlyFlag(a, "rules.alert.resend-delay", "Minimum amount of time to wait before resending an alert to Alertmanager.").
 		Default("1m").SetValue(&cfg.resendDelay)
 
 	a.Flag("scrape.adjust-timestamps", "Adjust scrape timestamps by up to `scrape.timestamp-tolerance` to align them to the intended schedule. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
@@ -307,25 +360,25 @@ func main() {
 	a.Flag("scrape.timestamp-tolerance", "Timestamp tolerance. See https://github.com/prometheus/prometheus/issues/7846 for more context. Experimental. This flag will be removed in a future release.").
 		Hidden().Default("2ms").DurationVar(&scrape.ScrapeTimestampTolerance)
 
-	a.Flag("alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
+	serverOnlyFlag(a, "alertmanager.notification-queue-capacity", "The capacity of the queue for pending Alertmanager notifications.").
 		Default("10000").IntVar(&cfg.notifier.QueueCapacity)
 
 	// TODO: Remove in Prometheus 3.0.
 	alertmanagerTimeout := a.Flag("alertmanager.timeout", "[DEPRECATED] This flag has no effect.").Hidden().String()
 
-	a.Flag("query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
+	serverOnlyFlag(a, "query.lookback-delta", "The maximum lookback duration for retrieving metrics during expression evaluations and federation.").
 		Default("5m").SetValue(&cfg.lookbackDelta)
 
-	a.Flag("query.timeout", "Maximum time a query may take before being aborted.").
+	serverOnlyFlag(a, "query.timeout", "Maximum time a query may take before being aborted.").
 		Default("2m").SetValue(&cfg.queryTimeout)
 
-	a.Flag("query.max-concurrency", "Maximum number of queries executed concurrently.").
+	serverOnlyFlag(a, "query.max-concurrency", "Maximum number of queries executed concurrently.").
 		Default("20").IntVar(&cfg.queryConcurrency)
 
-	a.Flag("query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
+	serverOnlyFlag(a, "query.max-samples", "Maximum number of samples a single query can load into memory. Note that queries will fail if they try to load more samples than this into memory, so this also limits the number of samples a query can return.").
 		Default("50000000").IntVar(&cfg.queryMaxSamples)
 
-	a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver, extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
+	a.Flag("enable-feature", "Comma separated feature names to enable. Valid options: agent, exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset, remote-write-receiver (DEPRECATED), extra-scrape-metrics, new-service-discovery-manager. See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more details.").
 		Default("").StringsVar(&cfg.featureList)
 
 	promlogflag.AddFlags(a, &cfg.promlogConfig)
@@ -344,6 +397,21 @@ func main() {
 		os.Exit(1)
 	}
 
+	if agentMode && len(serverOnlyFlags) > 0 {
+		fmt.Fprintf(os.Stderr, "The following flag(s) can not be used in agent mode: %q", serverOnlyFlags)
+		os.Exit(3)
+	}
+
+	if !agentMode && len(agentOnlyFlags) > 0 {
+		fmt.Fprintf(os.Stderr, "The following flag(s) can only be used in agent mode: %q", agentOnlyFlags)
+		os.Exit(3)
+	}
+
+	localStoragePath := cfg.serverStoragePath
+	if agentMode {
+		localStoragePath = cfg.agentStoragePath
+	}
+
 	cfg.web.ExternalURL, err = computeExternalURL(cfg.prometheusURL, cfg.web.ListenAddress)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, errors.Wrapf(err, "parse external URL %q", cfg.prometheusURL))
@@ -362,7 +430,7 @@ func main() {
 
 	// Throw error for invalid config before starting other components.
 	var cfgFile *config.Config
-	if cfgFile, err = config.LoadFile(cfg.configFile, false, log.NewNopLogger()); err != nil {
+	if cfgFile, err = config.LoadFile(cfg.configFile, agentMode, false, log.NewNopLogger()); err != nil {
 		level.Error(logger).Log("msg", fmt.Sprintf("Error loading config (--config.file=%s)", cfg.configFile), "err", err)
 		os.Exit(2)
 	}
@@ -390,7 +458,8 @@ func main() {
 	// RoutePrefix must always be at least '/'.
 	cfg.web.RoutePrefix = "/" + strings.Trim(cfg.web.RoutePrefix, "/")
 
-	{ // Time retention settings.
+	if !agentMode {
+		// Time retention settings.
 		if oldFlagRetentionDuration != 0 {
 			level.Warn(logger).Log("deprecation_notice", "'storage.tsdb.retention' flag is deprecated use 'storage.tsdb.retention.time' instead.")
 			cfg.tsdb.RetentionDuration = oldFlagRetentionDuration
@@ -415,9 +484,8 @@ func main() {
 			cfg.tsdb.RetentionDuration = y
 			level.Warn(logger).Log("msg", "Time retention value is too high. Limiting to: "+y.String())
 		}
-	}
 
-	{ // Max block size  settings.
+		// Max block size settings.
 		if cfg.tsdb.MaxBlockDuration == 0 {
 			maxBlockDuration, err := model.ParseDuration("31d")
 			if err != nil {
@@ -454,7 +522,7 @@ func main() {
 	var (
 		localStorage  = &readyStorage{stats: tsdb.NewDBStats()}
 		scraper       = &readyScrapeManager{}
-		remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, cfg.localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
+		remoteStorage = remote.NewStorage(log.With(logger, "component", "remote"), prometheus.DefaultRegisterer, localStorage.StartTime, localStoragePath, time.Duration(cfg.RemoteFlushDeadline), scraper)
 		fanoutStorage = storage.NewFanout(logger, localStorage, remoteStorage)
 	)
 
@@ -483,16 +551,23 @@ func main() {
 	var (
 		scrapeManager = scrape.NewManager(&cfg.scrape, log.With(logger, "component", "scrape manager"), fanoutStorage)
 
-		opts = promql.EngineOpts{
+		queryEngine *promql.Engine
+		ruleManager *rules.Manager
+	)
+
+	if !agentMode {
+		opts := promql.EngineOpts{
 			Logger:                   log.With(logger, "component", "query engine"),
 			Reg:                      prometheus.DefaultRegisterer,
 			MaxSamples:               cfg.queryMaxSamples,
 			Timeout:                  time.Duration(cfg.queryTimeout),
-			ActiveQueryTracker:       promql.NewActiveQueryTracker(cfg.localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")),
+			ActiveQueryTracker:       promql.NewActiveQueryTracker(localStoragePath, cfg.queryConcurrency, log.With(logger, "component", "activeQueryTracker")),
 			LookbackDelta:            time.Duration(cfg.lookbackDelta),
 			NoStepSubqueryIntervalFn: noStepSubqueryInterval.Get,
-			EnableAtModifier:         cfg.enablePromQLAtModifier,
-			EnableNegativeOffset:     cfg.enablePromQLNegativeOffset,
+			// EnableAtModifier and EnableNegativeOffset have to be
+			// always on for regular PromQL as of Prometheus v2.33.
+			EnableAtModifier:     true,
+			EnableNegativeOffset: true,
 		}
 
 		queryEngine = promql.NewEngine(opts)
@@ -510,14 +585,14 @@ func main() {
 			ForGracePeriod:  time.Duration(cfg.forGracePeriod),
 			ResendDelay:     time.Duration(cfg.resendDelay),
 		})
-	)
+	}
 
 	scraper.Set(scrapeManager)
 
 	cfg.web.Context = ctxWeb
 	cfg.web.TSDBRetentionDuration = cfg.tsdb.RetentionDuration
 	cfg.web.TSDBMaxBytes = cfg.tsdb.MaxBytes
-	cfg.web.TSDBDir = cfg.localStoragePath
+	cfg.web.TSDBDir = localStoragePath
 	cfg.web.LocalStorage = localStorage
 	cfg.web.Storage = fanoutStorage
 	cfg.web.ExemplarStorage = localStorage
@@ -526,6 +601,7 @@ func main() {
 	cfg.web.RuleManager = ruleManager
 	cfg.web.Notifier = notifierManager
 	cfg.web.LookbackDelta = time.Duration(cfg.lookbackDelta)
+	cfg.web.IsAgent = agentMode
 
 	cfg.web.Version = &web.PrometheusVersion{
 		Version:   version.Version,
@@ -557,7 +633,7 @@ func main() {
 	)
 
 	// This is passed to ruleManager.Update().
-	var externalURL = cfg.web.ExternalURL.String()
+	externalURL := cfg.web.ExternalURL.String()
 
 	reloaders := []reloader{
 		{
@@ -572,6 +648,11 @@ func main() {
 		}, {
 			name: "query_engine",
 			reloader: func(cfg *config.Config) error {
+				if agentMode {
+					// No-op in Agent mode.
+					return nil
+				}
+
 				if cfg.GlobalConfig.QueryLogFile == "" {
 					queryEngine.SetQueryLogger(nil)
 					return nil
@@ -613,6 +694,11 @@ func main() {
 		}, {
 			name: "rules",
 			reloader: func(cfg *config.Config) error {
+				if agentMode {
+					// No-op in Agent mode
+					return nil
+				}
+
 				// Get all rule files matching the configuration paths.
 				var files []string
 				for _, pat := range cfg.RuleFiles {
@@ -779,7 +865,6 @@ func main() {
 						return nil
 					}
 				}
-
 			},
 			func(err error) {
 				// Wait for any in-progress reloads to complete to avoid
@@ -817,7 +902,7 @@ func main() {
 			},
 		)
 	}
-	{
+	if !agentMode {
 		// Rule manager.
 		g.Add(
 			func() error {
@@ -829,8 +914,7 @@ func main() {
 				ruleManager.Stop()
 			},
 		)
-	}
-	{
+
 		// TSDB.
 		opts := cfg.tsdb.ToTSDBOptions()
 		cancel := make(chan struct{})
@@ -848,18 +932,12 @@ func main() {
 					}
 				}
 
-				db, err := openDBWithMetrics(
-					cfg.localStoragePath,
-					logger,
-					prometheus.DefaultRegisterer,
-					&opts,
-					localStorage.getStats(),
-				)
+				db, err := openDBWithMetrics(localStoragePath, logger, prometheus.DefaultRegisterer, &opts, localStorage.getStats())
 				if err != nil {
 					return errors.Wrapf(err, "opening storage failed")
 				}
 
-				switch fsType := prom_runtime.Statfs(cfg.localStoragePath); fsType {
+				switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
 				case "NFS_SUPER_MAGIC":
 					level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
 				default:
@@ -892,6 +970,59 @@ func main() {
 			},
 		)
 	}
+	if agentMode {
+		// WAL storage.
+		opts := cfg.agent.ToAgentOptions()
+		cancel := make(chan struct{})
+		g.Add(
+			func() error {
+				level.Info(logger).Log("msg", "Starting WAL storage ...")
+				if cfg.agent.WALSegmentSize != 0 {
+					if cfg.agent.WALSegmentSize < 10*1024*1024 || cfg.agent.WALSegmentSize > 256*1024*1024 {
+						return errors.New("flag 'storage.agent.wal-segment-size' must be set between 10MB and 256MB")
+					}
+				}
+				db, err := agent.Open(
+					logger,
+					prometheus.DefaultRegisterer,
+					remoteStorage,
+					localStoragePath,
+					&opts,
+				)
+				if err != nil {
+					return errors.Wrap(err, "opening storage failed")
+				}
+
+				switch fsType := prom_runtime.Statfs(localStoragePath); fsType {
+				case "NFS_SUPER_MAGIC":
+					level.Warn(logger).Log("fs_type", fsType, "msg", "This filesystem is not supported and may lead to data corruption and data loss. Please carefully read https://prometheus.io/docs/prometheus/latest/storage/ to learn more about supported filesystems.")
+				default:
+					level.Info(logger).Log("fs_type", fsType)
+				}
+
+				level.Info(logger).Log("msg", "Agent WAL storage started")
+				level.Debug(logger).Log("msg", "Agent WAL storage options",
+					"WALSegmentSize", cfg.agent.WALSegmentSize,
+					"WALCompression", cfg.agent.WALCompression,
+					"StripeSize", cfg.agent.StripeSize,
+					"TruncateFrequency", cfg.agent.TruncateFrequency,
+					"MinWALTime", cfg.agent.MinWALTime,
+					"MaxWALTime", cfg.agent.MaxWALTime,
+				)
+
+				localStorage.Set(db, 0)
+				close(dbOpen)
+				<-cancel
+				return nil
+			},
+			func(e error) {
+				if err := fanoutStorage.Close(); err != nil {
+					level.Error(logger).Log("msg", "Error stopping storage", "err", err)
+				}
+				close(cancel)
+			},
+		)
+	}
 	{
 		// Web handler.
 		g.Add(
@@ -977,6 +1108,7 @@ type safePromQLNoStepSubqueryInterval st
 func durationToInt64Millis(d time.Duration) int64 {
 	return int64(d / time.Millisecond)
 }
+
 func (i *safePromQLNoStepSubqueryInterval) Set(ev model.Duration) {
 	i.value.Store(durationToInt64Millis(time.Duration(ev)))
 }
@@ -990,7 +1122,7 @@ type reloader struct {
 	reloader func(*config.Config) error
 }
 
-func reloadConfig(filename string, expandExternalLabels bool, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
+func reloadConfig(filename string, expandExternalLabels, enableExemplarStorage bool, logger log.Logger, noStepSuqueryInterval *safePromQLNoStepSubqueryInterval, rls ...reloader) (err error) {
 	start := time.Now()
 	timings := []interface{}{}
 	level.Info(logger).Log("msg", "Loading configuration file", "filename", filename)
@@ -1004,7 +1136,7 @@ func reloadConfig(filename string, expan
 		}
 	}()
 
-	conf, err := config.LoadFile(filename, expandExternalLabels, logger)
+	conf, err := config.LoadFile(filename, agentMode, expandExternalLabels, logger)
 	if err != nil {
 		return errors.Wrapf(err, "couldn't load configuration (--config.file=%q)", filename)
 	}
@@ -1115,18 +1247,21 @@ func sendAlerts(s sender, externalURL st
 // storage at a later point in time.
 type readyStorage struct {
 	mtx             sync.RWMutex
-	db              *tsdb.DB
+	db              storage.Storage
 	startTimeMargin int64
 	stats           *tsdb.DBStats
 }
 
 func (s *readyStorage) ApplyConfig(conf *config.Config) error {
 	db := s.get()
-	return db.ApplyConfig(conf)
+	if db, ok := db.(*tsdb.DB); ok {
+		return db.ApplyConfig(conf)
+	}
+	return nil
 }
 
 // Set the storage.
-func (s *readyStorage) Set(db *tsdb.DB, startTimeMargin int64) {
+func (s *readyStorage) Set(db storage.Storage, startTimeMargin int64) {
 	s.mtx.Lock()
 	defer s.mtx.Unlock()
 
@@ -1134,7 +1269,7 @@ func (s *readyStorage) Set(db *tsdb.DB,
 	s.startTimeMargin = startTimeMargin
 }
 
-func (s *readyStorage) get() *tsdb.DB {
+func (s *readyStorage) get() storage.Storage {
 	s.mtx.RLock()
 	x := s.db
 	s.mtx.RUnlock()
@@ -1151,15 +1286,21 @@ func (s *readyStorage) getStats() *tsdb.
 // StartTime implements the Storage interface.
 func (s *readyStorage) StartTime() (int64, error) {
 	if x := s.get(); x != nil {
-		var startTime int64
-
-		if len(x.Blocks()) > 0 {
-			startTime = x.Blocks()[0].Meta().MinTime
-		} else {
-			startTime = time.Now().Unix() * 1000
+		switch db := x.(type) {
+		case *tsdb.DB:
+			var startTime int64
+			if len(db.Blocks()) > 0 {
+				startTime = db.Blocks()[0].Meta().MinTime
+			} else {
+				startTime = time.Now().Unix() * 1000
+			}
+			// Add a safety margin as it may take a few minutes for everything to spin up.
+			return startTime + s.startTimeMargin, nil
+		case *agent.DB:
+			return db.StartTime()
+		default:
+			panic(fmt.Sprintf("unknown storage type %T", db))
 		}
-		// Add a safety margin as it may take a few minutes for everything to spin up.
-		return startTime + s.startTimeMargin, nil
 	}
 
 	return math.MaxInt64, tsdb.ErrNotReady
@@ -1183,7 +1324,14 @@ func (s *readyStorage) ChunkQuerier(ctx
 
 func (s *readyStorage) ExemplarQuerier(ctx context.Context) (storage.ExemplarQuerier, error) {
 	if x := s.get(); x != nil {
-		return x.ExemplarQuerier(ctx)
+		switch db := x.(type) {
+		case *tsdb.DB:
+			return db.ExemplarQuerier(ctx)
+		case *agent.DB:
+			return nil, agent.ErrUnsupported
+		default:
+			panic(fmt.Sprintf("unknown storage type %T", db))
+		}
 	}
 	return nil, tsdb.ErrNotReady
 }
@@ -1198,11 +1346,11 @@ func (s *readyStorage) Appender(ctx cont
 
 type notReadyAppender struct{}
 
-func (n notReadyAppender) Append(ref uint64, l labels.Labels, t int64, v float64) (uint64, error) {
+func (n notReadyAppender) Append(ref storage.SeriesRef, l labels.Labels, t int64, v float64) (storage.SeriesRef, error) {
 	return 0, tsdb.ErrNotReady
 }
 
-func (n notReadyAppender) AppendExemplar(ref uint64, l labels.Labels, e exemplar.Exemplar) (uint64, error) {
+func (n notReadyAppender) AppendExemplar(ref storage.SeriesRef, l labels.Labels, e exemplar.Exemplar) (storage.SeriesRef, error) {
 	return 0, tsdb.ErrNotReady
 }
 
@@ -1221,7 +1369,14 @@ func (s *readyStorage) Close() error {
 // CleanTombstones implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
 func (s *readyStorage) CleanTombstones() error {
 	if x := s.get(); x != nil {
-		return x.CleanTombstones()
+		switch db := x.(type) {
+		case *tsdb.DB:
+			return db.CleanTombstones()
+		case *agent.DB:
+			return agent.ErrUnsupported
+		default:
+			panic(fmt.Sprintf("unknown storage type %T", db))
+		}
 	}
 	return tsdb.ErrNotReady
 }
@@ -1229,7 +1384,14 @@ func (s *readyStorage) CleanTombstones()
 // Delete implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
 func (s *readyStorage) Delete(mint, maxt int64, ms ...*labels.Matcher) error {
 	if x := s.get(); x != nil {
-		return x.Delete(mint, maxt, ms...)
+		switch db := x.(type) {
+		case *tsdb.DB:
+			return db.Delete(mint, maxt, ms...)
+		case *agent.DB:
+			return agent.ErrUnsupported
+		default:
+			panic(fmt.Sprintf("unknown storage type %T", db))
+		}
 	}
 	return tsdb.ErrNotReady
 }
@@ -1237,7 +1399,14 @@ func (s *readyStorage) Delete(mint, maxt
 // Snapshot implements the api_v1.TSDBAdminStats and api_v2.TSDBAdmin interfaces.
 func (s *readyStorage) Snapshot(dir string, withHead bool) error {
 	if x := s.get(); x != nil {
-		return x.Snapshot(dir, withHead)
+		switch db := x.(type) {
+		case *tsdb.DB:
+			return db.Snapshot(dir, withHead)
+		case *agent.DB:
+			return agent.ErrUnsupported
+		default:
+			panic(fmt.Sprintf("unknown storage type %T", db))
+		}
 	}
 	return tsdb.ErrNotReady
 }
@@ -1245,7 +1414,14 @@ func (s *readyStorage) Snapshot(dir stri
 // Stats implements the api_v1.TSDBAdminStats interface.
 func (s *readyStorage) Stats(statsByLabelName string) (*tsdb.Stats, error) {
 	if x := s.get(); x != nil {
-		return x.Head().Stats(statsByLabelName), nil
+		switch db := x.(type) {
+		case *tsdb.DB:
+			return db.Head().Stats(statsByLabelName), nil
+		case *agent.DB:
+			return nil, agent.ErrUnsupported
+		default:
+			panic(fmt.Sprintf("unknown storage type %T", db))
+		}
 	}
 	return nil, tsdb.ErrNotReady
 }
@@ -1323,6 +1499,29 @@ func (opts tsdbOptions) ToTSDBOptions()
 	}
 }
 
+// agentOptions is a version of agent.Options with defined units. This is required
+// as agent.Option fields are unit agnostic (time).
+type agentOptions struct {
+	WALSegmentSize         units.Base2Bytes
+	WALCompression         bool
+	StripeSize             int
+	TruncateFrequency      model.Duration
+	MinWALTime, MaxWALTime model.Duration
+	NoLockfile             bool
+}
+
+func (opts agentOptions) ToAgentOptions() agent.Options {
+	return agent.Options{
+		WALSegmentSize:    int(opts.WALSegmentSize),
+		WALCompression:    opts.WALCompression,
+		StripeSize:        opts.StripeSize,
+		TruncateFrequency: time.Duration(opts.TruncateFrequency),
+		MinWALTime:        durationToInt64Millis(time.Duration(opts.MinWALTime)),
+		MaxWALTime:        durationToInt64Millis(time.Duration(opts.MaxWALTime)),
+		NoLockfile:        opts.NoLockfile,
+	}
+}
+
 func initTracing(logger log.Logger) (io.Closer, error) {
 	// Set tracing configuration defaults.
 	cfg := &jcfg.Configuration{
diff -pruN 2.31.2+ds1-1/cmd/prometheus/main_test.go 2.33.5+ds1-2/cmd/prometheus/main_test.go
--- 2.31.2+ds1-1/cmd/prometheus/main_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/prometheus/main_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -14,6 +14,7 @@
 package main
 
 import (
+	"bytes"
 	"context"
 	"fmt"
 	"io/ioutil"
@@ -21,6 +22,7 @@ import (
 	"os"
 	"os/exec"
 	"path/filepath"
+	"strings"
 	"syscall"
 	"testing"
 	"time"
@@ -30,14 +32,16 @@ import (
 	"github.com/prometheus/common/model"
 	"github.com/stretchr/testify/require"
 
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/notifier"
-	"github.com/prometheus/prometheus/pkg/labels"
 	"github.com/prometheus/prometheus/rules"
 )
 
-var promPath = os.Args[0]
-var promConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
-var promData = filepath.Join(os.TempDir(), "data")
+var (
+	promPath    = os.Args[0]
+	promConfig  = filepath.Join("..", "..", "documentation", "examples", "prometheus.yml")
+	agentConfig = filepath.Join("..", "..", "documentation", "examples", "prometheus-agent.yml")
+)
 
 func TestMain(m *testing.M) {
 	for i, arg := range os.Args {
@@ -52,7 +56,6 @@ func TestMain(m *testing.M) {
 	os.Setenv("no_proxy", "localhost,127.0.0.1,0.0.0.0,:")
 
 	exitCode := m.Run()
-	os.RemoveAll(promData)
 	os.Exit(exitCode)
 }
 
@@ -202,7 +205,7 @@ func TestWALSegmentSizeBounds(t *testing
 	}
 
 	for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
-		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
+		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
 
 		// Log stderr in case of failure.
 		stderr, err := prom.StderrPipe()
@@ -223,6 +226,7 @@ func TestWALSegmentSizeBounds(t *testing
 				t.Errorf("prometheus should be still running: %v", err)
 			case <-time.After(5 * time.Second):
 				prom.Process.Kill()
+				<-done
 			}
 			continue
 		}
@@ -239,12 +243,14 @@ func TestWALSegmentSizeBounds(t *testing
 }
 
 func TestMaxBlockChunkSegmentSizeBounds(t *testing.T) {
+	t.Parallel()
+
 	if testing.Short() {
 		t.Skip("skipping test in short mode.")
 	}
 
 	for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
-		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
+		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig, "--storage.tsdb.path="+filepath.Join(t.TempDir(), "data"))
 
 		// Log stderr in case of failure.
 		stderr, err := prom.StderrPipe()
@@ -265,6 +271,7 @@ func TestMaxBlockChunkSegmentSizeBounds(
 				t.Errorf("prometheus should be still running: %v", err)
 			case <-time.After(5 * time.Second):
 				prom.Process.Kill()
+				<-done
 			}
 			continue
 		}
@@ -281,12 +288,7 @@ func TestMaxBlockChunkSegmentSizeBounds(
 }
 
 func TestTimeMetrics(t *testing.T) {
-	tmpDir, err := ioutil.TempDir("", "time_metrics_e2e")
-	require.NoError(t, err)
-
-	defer func() {
-		require.NoError(t, os.RemoveAll(tmpDir))
-	}()
+	tmpDir := t.TempDir()
 
 	reg := prometheus.NewRegistry()
 	db, err := openDBWithMetrics(tmpDir, log.NewNopLogger(), reg, nil, nil)
@@ -347,3 +349,130 @@ func getCurrentGaugeValuesFor(t *testing
 	}
 	return res
 }
+
+func TestAgentSuccessfulStartup(t *testing.T) {
+	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
+	require.NoError(t, prom.Start())
+
+	actualExitStatus := 0
+	done := make(chan error, 1)
+
+	go func() { done <- prom.Wait() }()
+	select {
+	case err := <-done:
+		t.Logf("prometheus agent should be still running: %v", err)
+		actualExitStatus = prom.ProcessState.ExitCode()
+	case <-time.After(5 * time.Second):
+		prom.Process.Kill()
+	}
+	require.Equal(t, 0, actualExitStatus)
+}
+
+func TestAgentFailedStartupWithServerFlag(t *testing.T) {
+	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig)
+
+	output := bytes.Buffer{}
+	prom.Stderr = &output
+	require.NoError(t, prom.Start())
+
+	actualExitStatus := 0
+	done := make(chan error, 1)
+
+	go func() { done <- prom.Wait() }()
+	select {
+	case err := <-done:
+		t.Logf("prometheus agent should not be running: %v", err)
+		actualExitStatus = prom.ProcessState.ExitCode()
+	case <-time.After(5 * time.Second):
+		prom.Process.Kill()
+	}
+
+	require.Equal(t, 3, actualExitStatus)
+
+	// Assert on last line.
+	lines := strings.Split(output.String(), "\n")
+	last := lines[len(lines)-1]
+	require.Equal(t, "The following flag(s) can not be used in agent mode: [\"--storage.tsdb.path\"]", last)
+}
+
+func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
+	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
+	require.NoError(t, prom.Start())
+
+	actualExitStatus := 0
+	done := make(chan error, 1)
+
+	go func() { done <- prom.Wait() }()
+	select {
+	case err := <-done:
+		t.Logf("prometheus agent should not be running: %v", err)
+		actualExitStatus = prom.ProcessState.ExitCode()
+	case <-time.After(5 * time.Second):
+		prom.Process.Kill()
+	}
+	require.Equal(t, 2, actualExitStatus)
+}
+
+func TestModeSpecificFlags(t *testing.T) {
+	if testing.Short() {
+		t.Skip("skipping test in short mode.")
+	}
+
+	testcases := []struct {
+		mode       string
+		arg        string
+		exitStatus int
+	}{
+		{"agent", "--storage.agent.path", 0},
+		{"server", "--storage.tsdb.path", 0},
+		{"server", "--storage.agent.path", 3},
+		{"agent", "--storage.tsdb.path", 3},
+	}
+
+	for _, tc := range testcases {
+		t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
+			args := []string{"-test.main", tc.arg, t.TempDir()}
+
+			if tc.mode == "agent" {
+				args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
+			} else {
+				args = append(args, "--config.file="+promConfig)
+			}
+
+			prom := exec.Command(promPath, args...)
+
+			// Log stderr in case of failure.
+			stderr, err := prom.StderrPipe()
+			require.NoError(t, err)
+			go func() {
+				slurp, _ := ioutil.ReadAll(stderr)
+				t.Log(string(slurp))
+			}()
+
+			err = prom.Start()
+			require.NoError(t, err)
+
+			if tc.exitStatus == 0 {
+				done := make(chan error, 1)
+				go func() { done <- prom.Wait() }()
+				select {
+				case err := <-done:
+					t.Errorf("prometheus should be still running: %v", err)
+				case <-time.After(5 * time.Second):
+					prom.Process.Kill()
+					<-done
+				}
+				return
+			}
+
+			err = prom.Wait()
+			require.Error(t, err)
+			if exitError, ok := err.(*exec.ExitError); ok {
+				status := exitError.Sys().(syscall.WaitStatus)
+				require.Equal(t, tc.exitStatus, status.ExitStatus())
+			} else {
+				t.Errorf("unable to retrieve the exit status for prometheus: %v", err)
+			}
+		})
+	}
+}
diff -pruN 2.31.2+ds1-1/cmd/prometheus/main_unix_test.go 2.33.5+ds1-2/cmd/prometheus/main_unix_test.go
--- 2.31.2+ds1-1/cmd/prometheus/main_unix_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/prometheus/main_unix_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -17,11 +17,14 @@
 package main
 
 import (
+	"fmt"
 	"net/http"
 	"os"
 	"os/exec"
 	"testing"
 	"time"
+
+	"github.com/prometheus/prometheus/util/testutil"
 )
 
 // As soon as prometheus starts responding to http request it should be able to
@@ -31,11 +34,12 @@ func TestStartupInterrupt(t *testing.T)
 		t.Skip("skipping test in short mode.")
 	}
 
-	prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+promData)
+	port := fmt.Sprintf(":%d", testutil.RandomUnprivilegedPort(t))
+
+	prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+t.TempDir(), "--web.listen-address=0.0.0.0"+port)
 	err := prom.Start()
 	if err != nil {
-		t.Errorf("execution error: %v", err)
-		return
+		t.Fatalf("execution error: %v", err)
 	}
 
 	done := make(chan error, 1)
@@ -46,11 +50,13 @@ func TestStartupInterrupt(t *testing.T)
 	var startedOk bool
 	var stoppedErr error
 
+	url := "http://localhost" + port + "/graph"
+
 Loop:
 	for x := 0; x < 10; x++ {
 		// error=nil means prometheus has started so we can send the interrupt
 		// signal and wait for the graceful shutdown.
-		if _, err := http.Get("http://localhost:9090/graph"); err == nil {
+		if _, err := http.Get(url); err == nil {
 			startedOk = true
 			prom.Process.Signal(os.Interrupt)
 			select {
@@ -64,12 +70,11 @@ Loop:
 	}
 
 	if !startedOk {
-		t.Errorf("prometheus didn't start in the specified timeout")
-		return
+		t.Fatal("prometheus didn't start in the specified timeout")
 	}
 	if err := prom.Process.Kill(); err == nil {
 		t.Errorf("prometheus didn't shutdown gracefully after sending the Interrupt signal")
 	} else if stoppedErr != nil && stoppedErr.Error() != "signal: interrupt" { // TODO - find a better way to detect when the process didn't exit as expected!
-		t.Errorf("prometheus exited with an unexpected error:%v", stoppedErr)
+		t.Errorf("prometheus exited with an unexpected error: %v", stoppedErr)
 	}
 }
diff -pruN 2.31.2+ds1-1/cmd/prometheus/query_log_test.go 2.33.5+ds1-2/cmd/prometheus/query_log_test.go
--- 2.31.2+ds1-1/cmd/prometheus/query_log_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/prometheus/query_log_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -31,6 +31,8 @@ import (
 	"time"
 
 	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/util/testutil"
 )
 
 type origin int
@@ -246,11 +248,7 @@ func (p *queryLogTest) run(t *testing.T)
 		p.setQueryLog(t, "")
 	}
 
-	dir, err := ioutil.TempDir("", "query_log_test")
-	require.NoError(t, err)
-	defer func() {
-		require.NoError(t, os.RemoveAll(dir))
-	}()
+	dir := t.TempDir()
 
 	params := append([]string{
 		"-test.main",
@@ -412,7 +410,6 @@ func TestQueryLog(t *testing.T) {
 	cwd, err := os.Getwd()
 	require.NoError(t, err)
 
-	port := 15000
 	for _, host := range []string{"127.0.0.1", "[::1]"} {
 		for _, prefix := range []string{"", "/foobar"} {
 			for _, enabledAtStart := range []bool{true, false} {
@@ -422,7 +419,7 @@ func TestQueryLog(t *testing.T) {
 						host:           host,
 						enabledAtStart: enabledAtStart,
 						prefix:         prefix,
-						port:           port,
+						port:           testutil.RandomUnprivilegedPort(t),
 						cwd:            cwd,
 					}
 
diff -pruN 2.31.2+ds1-1/cmd/promtool/archive.go 2.33.5+ds1-2/cmd/promtool/archive.go
--- 2.31.2+ds1-1/cmd/promtool/archive.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/archive.go	2022-03-08 16:34:32.000000000 +0000
@@ -21,7 +21,7 @@ import (
 	"github.com/pkg/errors"
 )
 
-const filePerm = 0666
+const filePerm = 0o666
 
 type tarGzFileWriter struct {
 	tarWriter *tar.Writer
diff -pruN 2.31.2+ds1-1/cmd/promtool/backfill.go 2.33.5+ds1-2/cmd/promtool/backfill.go
--- 2.31.2+ds1-1/cmd/promtool/backfill.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/backfill.go	2022-03-08 16:34:32.000000000 +0000
@@ -21,8 +21,9 @@ import (
 
 	"github.com/go-kit/log"
 	"github.com/pkg/errors"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/textparse"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/textparse"
 	"github.com/prometheus/prometheus/tsdb"
 	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
 )
@@ -105,7 +106,6 @@ func createBlocks(input []byte, mint, ma
 			// The next sample is not in this timerange, we can avoid parsing
 			// the file for this timerange.
 			continue
-
 		}
 		nextSampleTs = math.MaxInt64
 
@@ -207,13 +207,11 @@ func createBlocks(input []byte, mint, ma
 
 			return nil
 		}()
-
 		if err != nil {
 			return errors.Wrap(err, "process blocks")
 		}
 	}
 	return nil
-
 }
 
 func backfill(maxSamplesInAppender int, input []byte, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) (err error) {
diff -pruN 2.31.2+ds1-1/cmd/promtool/backfill_test.go 2.33.5+ds1-2/cmd/promtool/backfill_test.go
--- 2.31.2+ds1-1/cmd/promtool/backfill_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/backfill_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -15,17 +15,16 @@ package main
 
 import (
 	"context"
-	"io/ioutil"
 	"math"
-	"os"
 	"sort"
 	"testing"
 	"time"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/tsdb"
-	"github.com/stretchr/testify/require"
 )
 
 type backfillSample struct {
@@ -686,13 +685,9 @@ after_eof 1 2
 		t.Run(test.Description, func(t *testing.T) {
 			t.Logf("Test:%s", test.Description)
 
-			outputDir, err := ioutil.TempDir("", "myDir")
-			require.NoError(t, err)
-			defer func() {
-				require.NoError(t, os.RemoveAll(outputDir))
-			}()
+			outputDir := t.TempDir()
 
-			err = backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration)
+			err := backfill(test.MaxSamplesInAppender, []byte(test.ToParse), outputDir, false, false, test.MaxBlockDuration)
 
 			if !test.IsOk {
 				require.Error(t, err, test.Description)
diff -pruN 2.31.2+ds1-1/cmd/promtool/debug.go 2.33.5+ds1-2/cmd/promtool/debug.go
--- 2.31.2+ds1-1/cmd/promtool/debug.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/debug.go	2022-03-08 16:34:32.000000000 +0000
@@ -57,7 +57,6 @@ func debugWrite(cfg debugWriterConfig) e
 				return errors.Wrap(err, "error writing into the archive")
 			}
 		}
-
 	}
 
 	if err := archiver.close(); err != nil {
diff -pruN 2.31.2+ds1-1/cmd/promtool/main.go 2.33.5+ds1-2/cmd/promtool/main.go
--- 2.31.2+ds1-1/cmd/promtool/main.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/main.go	2022-03-08 16:34:32.000000000 +0000
@@ -18,6 +18,7 @@ import (
 	"context"
 	"encoding/json"
 	"fmt"
+	"io"
 	"io/ioutil"
 	"math"
 	"net/http"
@@ -27,6 +28,7 @@ import (
 	"sort"
 	"strconv"
 	"strings"
+	"text/tabwriter"
 	"time"
 
 	"github.com/go-kit/log"
@@ -43,14 +45,27 @@ import (
 	"gopkg.in/alecthomas/kingpin.v2"
 	yaml "gopkg.in/yaml.v2"
 
+	dto "github.com/prometheus/client_model/go"
+	"github.com/prometheus/common/expfmt"
+
 	"github.com/prometheus/prometheus/config"
+	"github.com/prometheus/prometheus/discovery"
 	"github.com/prometheus/prometheus/discovery/file"
 	_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
 	"github.com/prometheus/prometheus/discovery/kubernetes"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/rulefmt"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/rulefmt"
+	"github.com/prometheus/prometheus/notifier"
 	"github.com/prometheus/prometheus/promql"
+	"github.com/prometheus/prometheus/scrape"
+)
+
+const (
+	successExitCode = 0
+	failureExitCode = 1
+	// Exit code 3 is used for "one or more lint issues detected".
+	lintErrExitCode = 3
 )
 
 func main() {
@@ -60,11 +75,17 @@ func main() {
 
 	checkCmd := app.Command("check", "Check the resources for validity.")
 
+	sdCheckCmd := checkCmd.Command("service-discovery", "Perform service discovery for the given job name and report the results, including relabeling.")
+	sdConfigFile := sdCheckCmd.Arg("config-file", "The prometheus config file.").Required().ExistingFile()
+	sdJobName := sdCheckCmd.Arg("job", "The job to run service discovery for.").Required().String()
+	sdTimeout := sdCheckCmd.Flag("timeout", "The time to wait for discovery results.").Default("30s").Duration()
+
 	checkConfigCmd := checkCmd.Command("config", "Check if the config files are valid or not.")
 	configFiles := checkConfigCmd.Arg(
 		"config-files",
 		"The config files to check.",
 	).Required().ExistingFiles()
+	checkConfigSyntaxOnly := checkConfigCmd.Flag("syntax-only", "Only check the config file syntax, ignoring file and content validation referenced in the config").Bool()
 
 	checkWebConfigCmd := checkCmd.Command("web-config", "Check if the web config files are valid or not.")
 	webConfigFiles := checkWebConfigCmd.Arg(
@@ -79,6 +100,8 @@ func main() {
 	).Required().ExistingFiles()
 
 	checkMetricsCmd := checkCmd.Command("metrics", checkMetricsUsage)
+	checkMetricsExtended := checkCmd.Flag("extended", "Print extended information related to the cardinality of the metrics.").Bool()
+	agentMode := checkConfigCmd.Flag("agent", "Check config file for Prometheus in Agent mode.").Bool()
 
 	queryCmd := app.Command("query", "Run query against a Prometheus server.")
 	queryCmdFmt := queryCmd.Flag("format", "Output format of the query.").Short('o').Default("promql").Enum("promql", "json")
@@ -180,17 +203,14 @@ func main() {
 		p = &promqlPrinter{}
 	}
 
-	var queryOpts promql.LazyLoaderOpts
 	for _, f := range *featureList {
 		opts := strings.Split(f, ",")
 		for _, o := range opts {
 			switch o {
-			case "promql-at-modifier":
-				queryOpts.EnableAtModifier = true
-			case "promql-negative-offset":
-				queryOpts.EnableNegativeOffset = true
 			case "":
 				continue
+			case "promql-at-modifier", "promql-negative-offset":
+				fmt.Printf("  WARNING: Option for --enable-feature is a no-op after promotion to a stable feature: %q\n", o)
 			default:
 				fmt.Printf("  WARNING: Unknown option for --enable-feature: %q\n", o)
 			}
@@ -198,8 +218,11 @@ func main() {
 	}
 
 	switch parsedCmd {
+	case sdCheckCmd.FullCommand():
+		os.Exit(CheckSD(*sdConfigFile, *sdJobName, *sdTimeout))
+
 	case checkConfigCmd.FullCommand():
-		os.Exit(CheckConfig(*configFiles...))
+		os.Exit(CheckConfig(*agentMode, *checkConfigSyntaxOnly, *configFiles...))
 
 	case checkWebConfigCmd.FullCommand():
 		os.Exit(CheckWebConfig(*webConfigFiles...))
@@ -208,7 +231,7 @@ func main() {
 		os.Exit(CheckRules(*ruleFiles...))
 
 	case checkMetricsCmd.FullCommand():
-		os.Exit(CheckMetrics())
+		os.Exit(CheckMetrics(*checkMetricsExtended))
 
 	case queryInstantCmd.FullCommand():
 		os.Exit(QueryInstant(*queryInstantServer, *queryInstantExpr, *queryInstantTime, p))
@@ -232,7 +255,13 @@ func main() {
 		os.Exit(QueryLabels(*queryLabelsServer, *queryLabelsName, *queryLabelsBegin, *queryLabelsEnd, p))
 
 	case testRulesCmd.FullCommand():
-		os.Exit(RulesUnitTest(queryOpts, *testRulesFiles...))
+		os.Exit(RulesUnitTest(
+			promql.LazyLoaderOpts{
+				EnableAtModifier:     true,
+				EnableNegativeOffset: true,
+			},
+			*testRulesFiles...),
+		)
 
 	case tsdbBenchWriteCmd.FullCommand():
 		os.Exit(checkErr(benchmarkWrite(*benchWriteOutPath, *benchSamplesFile, *benchWriteNumMetrics, *benchWriteNumScrapes)))
@@ -245,7 +274,7 @@ func main() {
 
 	case tsdbDumpCmd.FullCommand():
 		os.Exit(checkErr(dumpSamples(*dumpPath, *dumpMinTime, *dumpMaxTime)))
-	//TODO(aSquare14): Work on adding support for custom block size.
+	// TODO(aSquare14): Work on adding support for custom block size.
 	case openMetricsImportCmd.FullCommand():
 		os.Exit(backfillOpenMetrics(*importFilePath, *importDBPath, *importHumanReadable, *importQuiet, *maxBlockDuration))
 
@@ -255,16 +284,19 @@ func main() {
 }
 
 // CheckConfig validates configuration files.
-func CheckConfig(files ...string) int {
+func CheckConfig(agentMode, checkSyntaxOnly bool, files ...string) int {
 	failed := false
 
 	for _, f := range files {
-		ruleFiles, err := checkConfig(f)
+		ruleFiles, err := checkConfig(agentMode, f, checkSyntaxOnly)
 		if err != nil {
 			fmt.Fprintln(os.Stderr, "  FAILED:", err)
 			failed = true
 		} else {
-			fmt.Printf("  SUCCESS: %d rule files found\n", len(ruleFiles))
+			if len(ruleFiles) > 0 {
+				fmt.Printf("  SUCCESS: %d rule files found\n", len(ruleFiles))
+			}
+			fmt.Printf(" SUCCESS: %s is valid prometheus config file syntax\n", f)
 		}
 		fmt.Println()
 
@@ -282,9 +314,9 @@ func CheckConfig(files ...string) int {
 		}
 	}
 	if failed {
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 // CheckWebConfig validates web configuration files.
@@ -300,9 +332,9 @@ func CheckWebConfig(files ...string) int
 		fmt.Fprintln(os.Stderr, f, "SUCCESS")
 	}
 	if failed {
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 func checkFileExists(fn string) error {
@@ -314,48 +346,55 @@ func checkFileExists(fn string) error {
 	return err
 }
 
-func checkConfig(filename string) ([]string, error) {
+func checkConfig(agentMode bool, filename string, checkSyntaxOnly bool) ([]string, error) {
 	fmt.Println("Checking", filename)
 
-	cfg, err := config.LoadFile(filename, false, log.NewNopLogger())
+	cfg, err := config.LoadFile(filename, agentMode, false, log.NewNopLogger())
 	if err != nil {
 		return nil, err
 	}
 
 	var ruleFiles []string
-	for _, rf := range cfg.RuleFiles {
-		rfs, err := filepath.Glob(rf)
-		if err != nil {
-			return nil, err
-		}
-		// If an explicit file was given, error if it is not accessible.
-		if !strings.Contains(rf, "*") {
-			if len(rfs) == 0 {
-				return nil, errors.Errorf("%q does not point to an existing file", rf)
+	if !checkSyntaxOnly {
+		for _, rf := range cfg.RuleFiles {
+			rfs, err := filepath.Glob(rf)
+			if err != nil {
+				return nil, err
 			}
-			if err := checkFileExists(rfs[0]); err != nil {
-				return nil, errors.Wrapf(err, "error checking rule file %q", rfs[0])
+			// If an explicit file was given, error if it is not accessible.
+			if !strings.Contains(rf, "*") {
+				if len(rfs) == 0 {
+					return nil, errors.Errorf("%q does not point to an existing file", rf)
+				}
+				if err := checkFileExists(rfs[0]); err != nil {
+					return nil, errors.Wrapf(err, "error checking rule file %q", rfs[0])
+				}
 			}
+			ruleFiles = append(ruleFiles, rfs...)
 		}
-		ruleFiles = append(ruleFiles, rfs...)
 	}
 
 	for _, scfg := range cfg.ScrapeConfigs {
-		if err := checkFileExists(scfg.HTTPClientConfig.BearerTokenFile); err != nil {
-			return nil, errors.Wrapf(err, "error checking bearer token file %q", scfg.HTTPClientConfig.BearerTokenFile)
+		if !checkSyntaxOnly && scfg.HTTPClientConfig.Authorization != nil {
+			if err := checkFileExists(scfg.HTTPClientConfig.Authorization.CredentialsFile); err != nil {
+				return nil, errors.Wrapf(err, "error checking authorization credentials or bearer token file %q", scfg.HTTPClientConfig.Authorization.CredentialsFile)
+			}
 		}
 
-		if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig); err != nil {
+		if err := checkTLSConfig(scfg.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
 			return nil, err
 		}
 
 		for _, c := range scfg.ServiceDiscoveryConfigs {
 			switch c := c.(type) {
 			case *kubernetes.SDConfig:
-				if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig); err != nil {
+				if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
 					return nil, err
 				}
 			case *file.SDConfig:
+				if checkSyntaxOnly {
+					break
+				}
 				for _, file := range c.Files {
 					files, err := filepath.Glob(file)
 					if err != nil {
@@ -363,30 +402,67 @@ func checkConfig(filename string) ([]str
 					}
 					if len(files) != 0 {
 						for _, f := range files {
-							err = checkSDFile(f)
+							var targetGroups []*targetgroup.Group
+							targetGroups, err = checkSDFile(f)
 							if err != nil {
 								return nil, errors.Errorf("checking SD file %q: %v", file, err)
 							}
+							if err := checkTargetGroupsForScrapeConfig(targetGroups, scfg); err != nil {
+								return nil, err
+							}
 						}
 						continue
 					}
 					fmt.Printf("  WARNING: file %q for file_sd in scrape job %q does not exist\n", file, scfg.JobName)
 				}
+			case discovery.StaticConfig:
+				if err := checkTargetGroupsForScrapeConfig(c, scfg); err != nil {
+					return nil, err
+				}
 			}
 		}
 	}
 
-	return ruleFiles, nil
-}
+	alertConfig := cfg.AlertingConfig
+	for _, amcfg := range alertConfig.AlertmanagerConfigs {
+		for _, c := range amcfg.ServiceDiscoveryConfigs {
+			switch c := c.(type) {
+			case *file.SDConfig:
+				if checkSyntaxOnly {
+					break
+				}
+				for _, file := range c.Files {
+					files, err := filepath.Glob(file)
+					if err != nil {
+						return nil, err
+					}
+					if len(files) != 0 {
+						for _, f := range files {
+							var targetGroups []*targetgroup.Group
+							targetGroups, err = checkSDFile(f)
+							if err != nil {
+								return nil, errors.Errorf("checking SD file %q: %v", file, err)
+							}
 
-func checkTLSConfig(tlsConfig config_util.TLSConfig) error {
-	if err := checkFileExists(tlsConfig.CertFile); err != nil {
-		return errors.Wrapf(err, "error checking client cert file %q", tlsConfig.CertFile)
-	}
-	if err := checkFileExists(tlsConfig.KeyFile); err != nil {
-		return errors.Wrapf(err, "error checking client key file %q", tlsConfig.KeyFile)
+							if err := checkTargetGroupsForAlertmanager(targetGroups, amcfg); err != nil {
+								return nil, err
+							}
+						}
+						continue
+					}
+					fmt.Printf("  WARNING: file %q for file_sd in alertmanager config does not exist\n", file)
+				}
+			case discovery.StaticConfig:
+				if err := checkTargetGroupsForAlertmanager(c, amcfg); err != nil {
+					return nil, err
+				}
+			}
+		}
 	}
+	return ruleFiles, nil
+}
 
+func checkTLSConfig(tlsConfig config_util.TLSConfig, checkSyntaxOnly bool) error {
 	if len(tlsConfig.CertFile) > 0 && len(tlsConfig.KeyFile) == 0 {
 		return errors.Errorf("client cert file %q specified without client key file", tlsConfig.CertFile)
 	}
@@ -394,19 +470,30 @@ func checkTLSConfig(tlsConfig config_uti
 		return errors.Errorf("client key file %q specified without client cert file", tlsConfig.KeyFile)
 	}
 
+	if checkSyntaxOnly {
+		return nil
+	}
+
+	if err := checkFileExists(tlsConfig.CertFile); err != nil {
+		return errors.Wrapf(err, "error checking client cert file %q", tlsConfig.CertFile)
+	}
+	if err := checkFileExists(tlsConfig.KeyFile); err != nil {
+		return errors.Wrapf(err, "error checking client key file %q", tlsConfig.KeyFile)
+	}
+
 	return nil
 }
 
-func checkSDFile(filename string) error {
+func checkSDFile(filename string) ([]*targetgroup.Group, error) {
 	fd, err := os.Open(filename)
 	if err != nil {
-		return err
+		return nil, err
 	}
 	defer fd.Close()
 
 	content, err := ioutil.ReadAll(fd)
 	if err != nil {
-		return err
+		return nil, err
 	}
 
 	var targetGroups []*targetgroup.Group
@@ -414,23 +501,23 @@ func checkSDFile(filename string) error
 	switch ext := filepath.Ext(filename); strings.ToLower(ext) {
 	case ".json":
 		if err := json.Unmarshal(content, &targetGroups); err != nil {
-			return err
+			return nil, err
 		}
 	case ".yml", ".yaml":
 		if err := yaml.UnmarshalStrict(content, &targetGroups); err != nil {
-			return err
+			return nil, err
 		}
 	default:
-		return errors.Errorf("invalid file extension: %q", ext)
+		return nil, errors.Errorf("invalid file extension: %q", ext)
 	}
 
 	for i, tg := range targetGroups {
 		if tg == nil {
-			return errors.Errorf("nil target group item found (index %d)", i)
+			return nil, errors.Errorf("nil target group item found (index %d)", i)
 		}
 	}
 
-	return nil
+	return targetGroups, nil
 }
 
 // CheckRules validates rule files.
@@ -450,9 +537,9 @@ func CheckRules(files ...string) int {
 		fmt.Println()
 	}
 	if failed {
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 func checkRules(filename string) (int, []error) {
@@ -460,7 +547,7 @@ func checkRules(filename string) (int, [
 
 	rgs, errs := rulefmt.ParseFile(filename)
 	if errs != nil {
-		return 0, errs
+		return successExitCode, errs
 	}
 
 	numRules := 0
@@ -507,7 +594,6 @@ func checkDuplicates(groups []rulefmt.Ru
 	var rules compareRuleTypes
 
 	for _, group := range groups {
-
 		for _, rule := range group.Rules {
 			rules = append(rules, compareRuleType{
 				metric: ruleMetric(rule),
@@ -552,12 +638,14 @@ $ curl -s http://localhost:9090/metrics
 `)
 
 // CheckMetrics performs a linting pass on input metrics.
-func CheckMetrics() int {
-	l := promlint.New(os.Stdin)
+func CheckMetrics(extended bool) int {
+	var buf bytes.Buffer
+	tee := io.TeeReader(os.Stdin, &buf)
+	l := promlint.New(tee)
 	problems, err := l.Lint()
 	if err != nil {
 		fmt.Fprintln(os.Stderr, "error while linting:", err)
-		return 1
+		return failureExitCode
 	}
 
 	for _, p := range problems {
@@ -565,10 +653,71 @@ func CheckMetrics() int {
 	}
 
 	if len(problems) > 0 {
-		return 3
+		return lintErrExitCode
+	}
+
+	if extended {
+		stats, total, err := checkMetricsExtended(&buf)
+		if err != nil {
+			fmt.Fprintln(os.Stderr, err)
+			return failureExitCode
+		}
+		w := tabwriter.NewWriter(os.Stdout, 4, 4, 4, ' ', tabwriter.TabIndent)
+		fmt.Fprintf(w, "Metric\tCardinality\tPercentage\t\n")
+		for _, stat := range stats {
+			fmt.Fprintf(w, "%s\t%d\t%.2f%%\t\n", stat.name, stat.cardinality, stat.percentage*100)
+		}
+		fmt.Fprintf(w, "Total\t%d\t%.f%%\t\n", total, 100.)
+		w.Flush()
+	}
+
+	return successExitCode
+}
+
+type metricStat struct {
+	name        string
+	cardinality int
+	percentage  float64
+}
+
+func checkMetricsExtended(r io.Reader) ([]metricStat, int, error) {
+	p := expfmt.TextParser{}
+	metricFamilies, err := p.TextToMetricFamilies(r)
+	if err != nil {
+		return nil, 0, fmt.Errorf("error while parsing text to metric families: %w", err)
+	}
+
+	var total int
+	stats := make([]metricStat, 0, len(metricFamilies))
+	for _, mf := range metricFamilies {
+		var cardinality int
+		switch mf.GetType() {
+		case dto.MetricType_COUNTER, dto.MetricType_GAUGE, dto.MetricType_UNTYPED:
+			cardinality = len(mf.Metric)
+		case dto.MetricType_HISTOGRAM:
+			// Histogram metrics includes sum, count, buckets.
+			buckets := len(mf.Metric[0].Histogram.Bucket)
+			cardinality = len(mf.Metric) * (2 + buckets)
+		case dto.MetricType_SUMMARY:
+			// Summary metrics includes sum, count, quantiles.
+			quantiles := len(mf.Metric[0].Summary.Quantile)
+			cardinality = len(mf.Metric) * (2 + quantiles)
+		default:
+			cardinality = len(mf.Metric)
+		}
+		stats = append(stats, metricStat{name: mf.GetName(), cardinality: cardinality})
+		total += cardinality
 	}
 
-	return 0
+	for i := range stats {
+		stats[i].percentage = float64(stats[i].cardinality) / float64(total)
+	}
+
+	sort.SliceStable(stats, func(i, j int) bool {
+		return stats[i].cardinality > stats[j].cardinality
+	})
+
+	return stats, total, nil
 }
 
 // QueryInstant performs an instant query against a Prometheus server.
@@ -584,7 +733,7 @@ func QueryInstant(url *url.URL, query, e
 	c, err := api.NewClient(config)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, "error creating API client:", err)
-		return 1
+		return failureExitCode
 	}
 
 	eTime := time.Now()
@@ -592,7 +741,7 @@ func QueryInstant(url *url.URL, query, e
 		eTime, err = parseTime(evalTime)
 		if err != nil {
 			fmt.Fprintln(os.Stderr, "error parsing evaluation time:", err)
-			return 1
+			return failureExitCode
 		}
 	}
 
@@ -608,7 +757,7 @@ func QueryInstant(url *url.URL, query, e
 
 	p.printValue(val)
 
-	return 0
+	return successExitCode
 }
 
 // QueryRange performs a range query against a Prometheus server.
@@ -633,7 +782,7 @@ func QueryRange(url *url.URL, headers ma
 	c, err := api.NewClient(config)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, "error creating API client:", err)
-		return 1
+		return failureExitCode
 	}
 
 	var stime, etime time.Time
@@ -644,7 +793,7 @@ func QueryRange(url *url.URL, headers ma
 		etime, err = parseTime(end)
 		if err != nil {
 			fmt.Fprintln(os.Stderr, "error parsing end time:", err)
-			return 1
+			return failureExitCode
 		}
 	}
 
@@ -654,13 +803,13 @@ func QueryRange(url *url.URL, headers ma
 		stime, err = parseTime(start)
 		if err != nil {
 			fmt.Fprintln(os.Stderr, "error parsing start time:", err)
-			return 1
+			return failureExitCode
 		}
 	}
 
 	if !stime.Before(etime) {
 		fmt.Fprintln(os.Stderr, "start time is not before end time")
-		return 1
+		return failureExitCode
 	}
 
 	if step == 0 {
@@ -681,7 +830,7 @@ func QueryRange(url *url.URL, headers ma
 	}
 
 	p.printValue(val)
-	return 0
+	return successExitCode
 }
 
 // QuerySeries queries for a series against a Prometheus server.
@@ -697,13 +846,13 @@ func QuerySeries(url *url.URL, matchers
 	c, err := api.NewClient(config)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, "error creating API client:", err)
-		return 1
+		return failureExitCode
 	}
 
 	stime, etime, err := parseStartTimeAndEndTime(start, end)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, err)
-		return 1
+		return failureExitCode
 	}
 
 	// Run query against client.
@@ -717,11 +866,11 @@ func QuerySeries(url *url.URL, matchers
 	}
 
 	p.printSeries(val)
-	return 0
+	return successExitCode
 }
 
 // QueryLabels queries for label values against a Prometheus server.
-func QueryLabels(url *url.URL, name string, start, end string, p printer) int {
+func QueryLabels(url *url.URL, name, start, end string, p printer) int {
 	if url.Scheme == "" {
 		url.Scheme = "http"
 	}
@@ -733,13 +882,13 @@ func QueryLabels(url *url.URL, name stri
 	c, err := api.NewClient(config)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, "error creating API client:", err)
-		return 1
+		return failureExitCode
 	}
 
 	stime, etime, err := parseStartTimeAndEndTime(start, end)
 	if err != nil {
 		fmt.Fprintln(os.Stderr, err)
-		return 1
+		return failureExitCode
 	}
 
 	// Run query against client.
@@ -756,7 +905,7 @@ func QueryLabels(url *url.URL, name stri
 	}
 
 	p.printLabelValues(val)
-	return 0
+	return successExitCode
 }
 
 func handleAPIError(err error) int {
@@ -767,7 +916,7 @@ func handleAPIError(err error) int {
 		fmt.Fprintln(os.Stderr, "query error:", err)
 	}
 
-	return 1
+	return failureExitCode
 }
 
 func parseStartTimeAndEndTime(start, end string) (time.Time, time.Time, error) {
@@ -859,9 +1008,9 @@ func debugPprof(url string) int {
 		endPointGroups: pprofEndpoints,
 	}); err != nil {
 		fmt.Fprintln(os.Stderr, "error completing debug command:", err)
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 func debugMetrics(url string) int {
@@ -871,9 +1020,9 @@ func debugMetrics(url string) int {
 		endPointGroups: metricsEndpoints,
 	}); err != nil {
 		fmt.Fprintln(os.Stderr, "error completing debug command:", err)
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 func debugAll(url string) int {
@@ -883,9 +1032,9 @@ func debugAll(url string) int {
 		endPointGroups: allEndpoints,
 	}); err != nil {
 		fmt.Fprintln(os.Stderr, "error completing debug command:", err)
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 type printer interface {
@@ -899,11 +1048,13 @@ type promqlPrinter struct{}
 func (p *promqlPrinter) printValue(v model.Value) {
 	fmt.Println(v)
 }
+
 func (p *promqlPrinter) printSeries(val []model.LabelSet) {
 	for _, v := range val {
 		fmt.Println(v)
 	}
 }
+
 func (p *promqlPrinter) printLabelValues(val model.LabelValues) {
 	for _, v := range val {
 		fmt.Println(v)
@@ -916,10 +1067,12 @@ func (j *jsonPrinter) printValue(v model
 	//nolint:errcheck
 	json.NewEncoder(os.Stdout).Encode(v)
 }
+
 func (j *jsonPrinter) printSeries(v []model.LabelSet) {
 	//nolint:errcheck
 	json.NewEncoder(os.Stdout).Encode(v)
 }
+
 func (j *jsonPrinter) printLabelValues(v model.LabelValues) {
 	//nolint:errcheck
 	json.NewEncoder(os.Stdout).Encode(v)
@@ -927,7 +1080,7 @@ func (j *jsonPrinter) printLabelValues(v
 
 // importRules backfills recording rules from the files provided. The output are blocks of data
 // at the outputDir location.
-func importRules(url *url.URL, start, end, outputDir string, evalInterval time.Duration, maxBlockDuration time.Duration, files ...string) error {
+func importRules(url *url.URL, start, end, outputDir string, evalInterval, maxBlockDuration time.Duration, files ...string) error {
 	ctx := context.Background()
 	var stime, etime time.Time
 	var err error
@@ -980,4 +1133,26 @@ func importRules(url *url.URL, start, en
 	}
 
 	return nil
+}
+
+func checkTargetGroupsForAlertmanager(targetGroups []*targetgroup.Group, amcfg *config.AlertmanagerConfig) error {
+	for _, tg := range targetGroups {
+		if _, _, err := notifier.AlertmanagerFromGroup(tg, amcfg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func checkTargetGroupsForScrapeConfig(targetGroups []*targetgroup.Group, scfg *config.ScrapeConfig) error {
+	for _, tg := range targetGroups {
+		_, failures := scrape.TargetsFromGroup(tg, scfg)
+		if len(failures) > 0 {
+			first := failures[0]
+			return first
+		}
+	}
+
+	return nil
 }
diff -pruN 2.31.2+ds1-1/cmd/promtool/main_test.go 2.33.5+ds1-2/cmd/promtool/main_test.go
--- 2.31.2+ds1-1/cmd/promtool/main_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/main_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -18,12 +18,16 @@ import (
 	"net/http"
 	"net/http/httptest"
 	"net/url"
+	"os"
+	"runtime"
+	"strings"
 	"testing"
 	"time"
 
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/rulefmt"
 	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/rulefmt"
 )
 
 func TestQueryRange(t *testing.T) {
@@ -111,7 +115,7 @@ func TestCheckSDFile(t *testing.T) {
 	}
 	for _, test := range cases {
 		t.Run(test.name, func(t *testing.T) {
-			err := checkSDFile(test.file)
+			_, err := checkSDFile(test.file)
 			if test.err != "" {
 				require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
 				return
@@ -163,3 +167,195 @@ func BenchmarkCheckDuplicates(b *testing
 		checkDuplicates(rgs.Groups)
 	}
 }
+
+func TestCheckTargetConfig(t *testing.T) {
+	cases := []struct {
+		name string
+		file string
+		err  string
+	}{
+		{
+			name: "url_in_scrape_targetgroup_with_relabel_config.good",
+			file: "url_in_scrape_targetgroup_with_relabel_config.good.yml",
+			err:  "",
+		},
+		{
+			name: "url_in_alert_targetgroup_with_relabel_config.good",
+			file: "url_in_alert_targetgroup_with_relabel_config.good.yml",
+			err:  "",
+		},
+		{
+			name: "url_in_scrape_targetgroup_with_relabel_config.bad",
+			file: "url_in_scrape_targetgroup_with_relabel_config.bad.yml",
+			err:  "instance 0 in group 0: \"http://bad\" is not a valid hostname",
+		},
+		{
+			name: "url_in_alert_targetgroup_with_relabel_config.bad",
+			file: "url_in_alert_targetgroup_with_relabel_config.bad.yml",
+			err:  "\"http://bad\" is not a valid hostname",
+		},
+	}
+	for _, test := range cases {
+		t.Run(test.name, func(t *testing.T) {
+			_, err := checkConfig(false, "testdata/"+test.file, false)
+			if test.err != "" {
+				require.Equalf(t, test.err, err.Error(), "Expected error %q, got %q", test.err, err.Error())
+				return
+			}
+			require.NoError(t, err)
+		})
+	}
+}
+
+func TestCheckConfigSyntax(t *testing.T) {
+	cases := []struct {
+		name       string
+		file       string
+		syntaxOnly bool
+		err        string
+		errWindows string
+	}{
+		{
+			name:       "check with syntax only succeeds with nonexistent rule files",
+			file:       "config_with_rule_files.yml",
+			syntaxOnly: true,
+			err:        "",
+			errWindows: "",
+		},
+		{
+			name:       "check without syntax only fails with nonexistent rule files",
+			file:       "config_with_rule_files.yml",
+			syntaxOnly: false,
+			err:        "\"testdata/non-existent-file.yml\" does not point to an existing file",
+			errWindows: "\"testdata\\\\non-existent-file.yml\" does not point to an existing file",
+		},
+		{
+			name:       "check with syntax only succeeds with nonexistent service discovery files",
+			file:       "config_with_service_discovery_files.yml",
+			syntaxOnly: true,
+			err:        "",
+			errWindows: "",
+		},
+		// The test below doesn't fail because the file verification for ServiceDiscoveryConfigs doesn't fail the check if
+		// file isn't found; it only outputs a warning message.
+		{
+			name:       "check without syntax only succeeds with nonexistent service discovery files",
+			file:       "config_with_service_discovery_files.yml",
+			syntaxOnly: false,
+			err:        "",
+			errWindows: "",
+		},
+		{
+			name:       "check with syntax only succeeds with nonexistent TLS files",
+			file:       "config_with_tls_files.yml",
+			syntaxOnly: true,
+			err:        "",
+			errWindows: "",
+		},
+		{
+			name:       "check without syntax only fails with nonexistent TLS files",
+			file:       "config_with_tls_files.yml",
+			syntaxOnly: false,
+			err: "error checking client cert file \"testdata/nonexistent_cert_file.yml\": " +
+				"stat testdata/nonexistent_cert_file.yml: no such file or directory",
+			errWindows: "error checking client cert file \"testdata\\\\nonexistent_cert_file.yml\": " +
+				"CreateFile testdata\\nonexistent_cert_file.yml: The system cannot find the file specified.",
+		},
+		{
+			name:       "check with syntax only succeeds with nonexistent credentials file",
+			file:       "authorization_credentials_file.bad.yml",
+			syntaxOnly: true,
+			err:        "",
+			errWindows: "",
+		},
+		{
+			name:       "check without syntax only fails with nonexistent credentials file",
+			file:       "authorization_credentials_file.bad.yml",
+			syntaxOnly: false,
+			err: "error checking authorization credentials or bearer token file \"/random/file/which/does/not/exist.yml\": " +
+				"stat /random/file/which/does/not/exist.yml: no such file or directory",
+			errWindows: "error checking authorization credentials or bearer token file \"testdata\\\\random\\\\file\\\\which\\\\does\\\\not\\\\exist.yml\": " +
+				"CreateFile testdata\\random\\file\\which\\does\\not\\exist.yml: The system cannot find the path specified.",
+		},
+	}
+	for _, test := range cases {
+		t.Run(test.name, func(t *testing.T) {
+			_, err := checkConfig(false, "testdata/"+test.file, test.syntaxOnly)
+			expectedErrMsg := test.err
+			if strings.Contains(runtime.GOOS, "windows") {
+				expectedErrMsg = test.errWindows
+			}
+			if expectedErrMsg != "" {
+				require.Equalf(t, expectedErrMsg, err.Error(), "Expected error %q, got %q", test.err, err.Error())
+				return
+			}
+			require.NoError(t, err)
+		})
+	}
+}
+
+func TestAuthorizationConfig(t *testing.T) {
+	cases := []struct {
+		name string
+		file string
+		err  string
+	}{
+		{
+			name: "authorization_credentials_file.bad",
+			file: "authorization_credentials_file.bad.yml",
+			err:  "error checking authorization credentials or bearer token file",
+		},
+		{
+			name: "authorization_credentials_file.good",
+			file: "authorization_credentials_file.good.yml",
+			err:  "",
+		},
+	}
+
+	for _, test := range cases {
+		t.Run(test.name, func(t *testing.T) {
+			_, err := checkConfig(false, "testdata/"+test.file, false)
+			if test.err != "" {
+				require.Contains(t, err.Error(), test.err, "Expected error to contain %q, got %q", test.err, err.Error())
+				return
+			}
+			require.NoError(t, err)
+		})
+	}
+}
+
+func TestCheckMetricsExtended(t *testing.T) {
+	if runtime.GOOS == "windows" {
+		t.Skip("Skipping on windows")
+	}
+
+	f, err := os.Open("testdata/metrics-test.prom")
+	require.NoError(t, err)
+	defer f.Close()
+
+	stats, total, err := checkMetricsExtended(f)
+	require.NoError(t, err)
+	require.Equal(t, 27, total)
+	require.Equal(t, []metricStat{
+		{
+			name:        "prometheus_tsdb_compaction_chunk_size_bytes",
+			cardinality: 15,
+			percentage:  float64(15) / float64(27),
+		},
+		{
+			name:        "go_gc_duration_seconds",
+			cardinality: 7,
+			percentage:  float64(7) / float64(27),
+		},
+		{
+			name:        "net_conntrack_dialer_conn_attempted_total",
+			cardinality: 4,
+			percentage:  float64(4) / float64(27),
+		},
+		{
+			name:        "go_info",
+			cardinality: 1,
+			percentage:  float64(1) / float64(27),
+		},
+	}, stats)
+}
diff -pruN 2.31.2+ds1-1/cmd/promtool/rules.go 2.33.5+ds1-2/cmd/promtool/rules.go
--- 2.31.2+ds1-1/cmd/promtool/rules.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/rules.go	2022-03-08 16:34:32.000000000 +0000
@@ -23,8 +23,9 @@ import (
 	"github.com/pkg/errors"
 	v1 "github.com/prometheus/client_golang/api/prometheus/v1"
 	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/timestamp"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/timestamp"
 	"github.com/prometheus/prometheus/rules"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/tsdb"
@@ -170,7 +171,7 @@ func (importer *ruleImporter) importRule
 				}
 			}
 		default:
-			return errors.New(fmt.Sprintf("rule result is wrong type %s", val.Type().String()))
+			return fmt.Errorf("rule result is wrong type %s", val.Type().String())
 		}
 
 		if err := app.flushAndCommit(ctx); err != nil {
diff -pruN 2.31.2+ds1-1/cmd/promtool/rules_test.go 2.33.5+ds1-2/cmd/promtool/rules_test.go
--- 2.31.2+ds1-1/cmd/promtool/rules_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/rules_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -17,7 +17,6 @@ import (
 	"context"
 	"io/ioutil"
 	"math"
-	"os"
 	"path/filepath"
 	"testing"
 	"time"
@@ -25,9 +24,10 @@ import (
 	"github.com/go-kit/log"
 	v1 "github.com/prometheus/client_golang/api/prometheus/v1"
 	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/tsdb"
 	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/tsdb"
 )
 
 type mockQueryRangeAPI struct {
@@ -54,7 +54,7 @@ func TestBackfillRuleIntegration(t *test
 		twentyFourHourDuration, _ = time.ParseDuration("24h")
 	)
 
-	var testCases = []struct {
+	testCases := []struct {
 		name                string
 		runcount            int
 		maxBlockDuration    time.Duration
@@ -71,11 +71,7 @@ func TestBackfillRuleIntegration(t *test
 	}
 	for _, tt := range testCases {
 		t.Run(tt.name, func(t *testing.T) {
-			tmpDir, err := ioutil.TempDir("", "backfilldata")
-			require.NoError(t, err)
-			defer func() {
-				require.NoError(t, os.RemoveAll(tmpDir))
-			}()
+			tmpDir := t.TempDir()
 			ctx := context.Background()
 
 			// Execute the test more than once to simulate running the rule importer twice with the same data.
@@ -192,7 +188,7 @@ func createSingleRuleTestFiles(path stri
     labels:
         testlabel11: testlabelvalue11
 `
-	return ioutil.WriteFile(path, []byte(recordingRules), 0777)
+	return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
 }
 
 func createMultiRuleTestFiles(path string) error {
@@ -212,17 +208,13 @@ func createMultiRuleTestFiles(path strin
     labels:
         testlabel11: testlabelvalue13
 `
-	return ioutil.WriteFile(path, []byte(recordingRules), 0777)
+	return ioutil.WriteFile(path, []byte(recordingRules), 0o777)
 }
 
 // TestBackfillLabels confirms that the labels in the rule file override the labels from the metrics
 // received from Prometheus Query API, including the __name__ label.
 func TestBackfillLabels(t *testing.T) {
-	tmpDir, err := ioutil.TempDir("", "backfilldata")
-	require.NoError(t, err)
-	defer func() {
-		require.NoError(t, os.RemoveAll(tmpDir))
-	}()
+	tmpDir := t.TempDir()
 	ctx := context.Background()
 
 	start := time.Date(2009, time.November, 10, 6, 34, 0, 0, time.UTC)
@@ -244,7 +236,7 @@ func TestBackfillLabels(t *testing.T) {
     labels:
         name1: value-from-rule
 `
-	require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0777))
+	require.NoError(t, ioutil.WriteFile(path, []byte(recordingRules), 0o777))
 	errs := ruleImporter.loadGroups(ctx, []string{path})
 	for _, err := range errs {
 		require.NoError(t, err)
diff -pruN 2.31.2+ds1-1/cmd/promtool/sd.go 2.33.5+ds1-2/cmd/promtool/sd.go
--- 2.31.2+ds1-1/cmd/promtool/sd.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/sd.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,148 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"context"
+	"encoding/json"
+	"fmt"
+	"os"
+	"reflect"
+	"time"
+
+	"github.com/go-kit/log"
+
+	"github.com/prometheus/prometheus/config"
+	"github.com/prometheus/prometheus/discovery"
+	"github.com/prometheus/prometheus/discovery/targetgroup"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/scrape"
+)
+
+type sdCheckResult struct {
+	DiscoveredLabels labels.Labels `json:"discoveredLabels"`
+	Labels           labels.Labels `json:"labels"`
+	Error            error         `json:"error,omitempty"`
+}
+
+// CheckSD performs service discovery for the given job name and reports the results.
+func CheckSD(sdConfigFiles, sdJobName string, sdTimeout time.Duration) int {
+	logger := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
+
+	cfg, err := config.LoadFile(sdConfigFiles, false, false, logger)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, "Cannot load config", err)
+		return failureExitCode
+	}
+
+	var scrapeConfig *config.ScrapeConfig
+	jobs := []string{}
+	jobMatched := false
+	for _, v := range cfg.ScrapeConfigs {
+		jobs = append(jobs, v.JobName)
+		if v.JobName == sdJobName {
+			jobMatched = true
+			scrapeConfig = v
+			break
+		}
+	}
+
+	if !jobMatched {
+		fmt.Fprintf(os.Stderr, "Job %s not found. Select one of:\n", sdJobName)
+		for _, job := range jobs {
+			fmt.Fprintf(os.Stderr, "\t%s\n", job)
+		}
+		return failureExitCode
+	}
+
+	targetGroupChan := make(chan []*targetgroup.Group)
+	ctx, cancel := context.WithTimeout(context.Background(), sdTimeout)
+	defer cancel()
+
+	for _, cfg := range scrapeConfig.ServiceDiscoveryConfigs {
+		d, err := cfg.NewDiscoverer(discovery.DiscovererOptions{Logger: logger})
+		if err != nil {
+			fmt.Fprintln(os.Stderr, "Could not create new discoverer", err)
+			return failureExitCode
+		}
+		go d.Run(ctx, targetGroupChan)
+	}
+
+	var targetGroups []*targetgroup.Group
+	sdCheckResults := make(map[string][]*targetgroup.Group)
+outerLoop:
+	for {
+		select {
+		case targetGroups = <-targetGroupChan:
+			for _, tg := range targetGroups {
+				sdCheckResults[tg.Source] = append(sdCheckResults[tg.Source], tg)
+			}
+		case <-ctx.Done():
+			break outerLoop
+		}
+	}
+	results := []sdCheckResult{}
+	for _, tgs := range sdCheckResults {
+		results = append(results, getSDCheckResult(tgs, scrapeConfig)...)
+	}
+
+	res, err := json.MarshalIndent(results, "", "  ")
+	if err != nil {
+		fmt.Fprintf(os.Stderr, "Could not marshal result json: %s", err)
+		return failureExitCode
+	}
+
+	fmt.Printf("%s", res)
+	return successExitCode
+}
+
+func getSDCheckResult(targetGroups []*targetgroup.Group, scrapeConfig *config.ScrapeConfig) []sdCheckResult {
+	sdCheckResults := []sdCheckResult{}
+	for _, targetGroup := range targetGroups {
+		for _, target := range targetGroup.Targets {
+			labelSlice := make([]labels.Label, 0, len(target)+len(targetGroup.Labels))
+
+			for name, value := range target {
+				labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)})
+			}
+
+			for name, value := range targetGroup.Labels {
+				if _, ok := target[name]; !ok {
+					labelSlice = append(labelSlice, labels.Label{Name: string(name), Value: string(value)})
+				}
+			}
+
+			targetLabels := labels.New(labelSlice...)
+			res, orig, err := scrape.PopulateLabels(targetLabels, scrapeConfig)
+			result := sdCheckResult{
+				DiscoveredLabels: orig,
+				Labels:           res,
+				Error:            err,
+			}
+
+			duplicateRes := false
+			for _, sdCheckRes := range sdCheckResults {
+				if reflect.DeepEqual(sdCheckRes, result) {
+					duplicateRes = true
+					break
+				}
+			}
+
+			if !duplicateRes {
+				sdCheckResults = append(sdCheckResults, result)
+			}
+		}
+	}
+	return sdCheckResults
+}
diff -pruN 2.31.2+ds1-1/cmd/promtool/sd_test.go 2.33.5+ds1-2/cmd/promtool/sd_test.go
--- 2.31.2+ds1-1/cmd/promtool/sd_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/sd_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,73 @@
+// Copyright 2021 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package main
+
+import (
+	"testing"
+	"time"
+
+	"github.com/prometheus/common/model"
+
+	"github.com/prometheus/prometheus/config"
+	"github.com/prometheus/prometheus/discovery/targetgroup"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/relabel"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestSDCheckResult(t *testing.T) {
+	targetGroups := []*targetgroup.Group{{
+		Targets: []model.LabelSet{
+			map[model.LabelName]model.LabelValue{"__address__": "localhost:8080", "foo": "bar"},
+		},
+	}}
+
+	reg, err := relabel.NewRegexp("(.*)")
+	require.Nil(t, err)
+
+	scrapeConfig := &config.ScrapeConfig{
+		ScrapeInterval: model.Duration(1 * time.Minute),
+		ScrapeTimeout:  model.Duration(10 * time.Second),
+		RelabelConfigs: []*relabel.Config{{
+			SourceLabels: model.LabelNames{"foo"},
+			Action:       relabel.Replace,
+			TargetLabel:  "newfoo",
+			Regex:        reg,
+			Replacement:  "$1",
+		}},
+	}
+
+	expectedSDCheckResult := []sdCheckResult{
+		{
+			DiscoveredLabels: labels.Labels{
+				labels.Label{Name: "__address__", Value: "localhost:8080"},
+				labels.Label{Name: "__scrape_interval__", Value: "1m"},
+				labels.Label{Name: "__scrape_timeout__", Value: "10s"},
+				labels.Label{Name: "foo", Value: "bar"},
+			},
+			Labels: labels.Labels{
+				labels.Label{Name: "__address__", Value: "localhost:8080"},
+				labels.Label{Name: "__scrape_interval__", Value: "1m"},
+				labels.Label{Name: "__scrape_timeout__", Value: "10s"},
+				labels.Label{Name: "foo", Value: "bar"},
+				labels.Label{Name: "instance", Value: "localhost:8080"},
+				labels.Label{Name: "newfoo", Value: "bar"},
+			},
+			Error: nil,
+		},
+	}
+
+	require.Equal(t, expectedSDCheckResult, getSDCheckResult(targetGroups, scrapeConfig))
+}
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/authorization_credentials_file.bad.yml 2.33.5+ds1-2/cmd/promtool/testdata/authorization_credentials_file.bad.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/authorization_credentials_file.bad.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/authorization_credentials_file.bad.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,4 @@
+scrape_configs:
+  - job_name: test
+    authorization:
+      credentials_file: "/random/file/which/does/not/exist.yml"
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/authorization_credentials_file.good.yml 2.33.5+ds1-2/cmd/promtool/testdata/authorization_credentials_file.good.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/authorization_credentials_file.good.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/authorization_credentials_file.good.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,4 @@
+scrape_configs:
+  - job_name: test
+    authorization:
+      credentials_file: "."
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/config_with_rule_files.yml 2.33.5+ds1-2/cmd/promtool/testdata/config_with_rule_files.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/config_with_rule_files.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/config_with_rule_files.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,3 @@
+rule_files:
+  - non-existent-file.yml
+  - /etc/non/existent/file.yml
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/config_with_service_discovery_files.yml 2.33.5+ds1-2/cmd/promtool/testdata/config_with_service_discovery_files.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/config_with_service_discovery_files.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/config_with_service_discovery_files.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,12 @@
+scrape_configs:
+  - job_name: prometheus
+    file_sd_configs:
+      - files:
+          - nonexistent_file.yml
+alerting:
+  alertmanagers:
+    - scheme: http
+      api_version: v1
+      file_sd_configs:
+        - files:
+            - nonexistent_file.yml
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/config_with_tls_files.yml 2.33.5+ds1-2/cmd/promtool/testdata/config_with_tls_files.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/config_with_tls_files.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/config_with_tls_files.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,5 @@
+scrape_configs:
+  - job_name: "some job"
+    tls_config:
+      cert_file: nonexistent_cert_file.yml
+      key_file: nonexistent_key_file.yml
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/long-period.yml 2.33.5+ds1-2/cmd/promtool/testdata/long-period.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/long-period.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/long-period.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,34 @@
+# Evaluate once every 100d to avoid this taking too long.
+evaluation_interval: 100d
+
+rule_files:
+  - rules.yml
+
+tests:
+  - interval: 100d
+    input_series:
+      - series: test
+        # Max time in time.Duration is 106751d from 1970 (2^63/10^9), i.e. 2262.
+        # We use the nearest 100 days to that to ensure the unit tests can fully
+        # cover the expected range.
+        values: '0+1x1067'
+
+    promql_expr_test:
+      - expr: timestamp(test)
+        eval_time: 0m
+        exp_samples:
+          - value: 0
+      - expr: test
+        eval_time: 100d # one evaluation_interval.
+        exp_samples:
+          - labels: test
+            value: 1
+      - expr: timestamp(test)
+        eval_time: 106700d
+        exp_samples:
+          - value: 9218880000 # 106700d -> seconds.
+      - expr: fixed_data
+        eval_time: 106700d
+        exp_samples:
+          - labels: fixed_data
+            value: 1
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/metrics-test.prom 2.33.5+ds1-2/cmd/promtool/testdata/metrics-test.prom
--- 2.31.2+ds1-1/cmd/promtool/testdata/metrics-test.prom	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/metrics-test.prom	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,35 @@
+# HELP go_gc_duration_seconds A summary of the pause duration of garbage collection cycles.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 2.391e-05
+go_gc_duration_seconds{quantile="0.25"} 9.4402e-05
+go_gc_duration_seconds{quantile="0.5"} 0.000118953
+go_gc_duration_seconds{quantile="0.75"} 0.000145884
+go_gc_duration_seconds{quantile="1"} 0.005201208
+go_gc_duration_seconds_sum 0.036134048
+go_gc_duration_seconds_count 232
+# HELP prometheus_tsdb_compaction_chunk_size_bytes Final size of chunks on their first compaction
+# TYPE prometheus_tsdb_compaction_chunk_size_bytes histogram
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="32"} 662
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="48"} 1460
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="72"} 2266
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="108"} 3958
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="162"} 4861
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="243"} 5721
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="364.5"} 10493
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="546.75"} 12464
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="820.125"} 13254
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1230.1875"} 13699
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="1845.28125"} 13806
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="2767.921875"} 13852
+prometheus_tsdb_compaction_chunk_size_bytes_bucket{le="+Inf"} 13867
+prometheus_tsdb_compaction_chunk_size_bytes_sum 3.886707e+06
+prometheus_tsdb_compaction_chunk_size_bytes_count 13867
+# HELP net_conntrack_dialer_conn_attempted_total Total number of connections attempted by the given dialer a given name.
+# TYPE net_conntrack_dialer_conn_attempted_total counter
+net_conntrack_dialer_conn_attempted_total{dialer_name="blackbox"} 5210
+net_conntrack_dialer_conn_attempted_total{dialer_name="default"} 0
+net_conntrack_dialer_conn_attempted_total{dialer_name="node"} 21
+net_conntrack_dialer_conn_attempted_total{dialer_name="prometheus"} 21
+# HELP go_info Information about the Go environment.
+# TYPE go_info gauge
+go_info{version="go1.17"} 1
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml 2.33.5+ds1-2/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.bad.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,8 @@
+alerting:
+  alertmanagers:
+    - relabel_configs:
+        - source_labels: [__address__]
+          target_label: __param_target
+      static_configs:
+        - targets:
+            - http://bad
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml 2.33.5+ds1-2/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/url_in_alert_targetgroup_with_relabel_config.good.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,10 @@
+alerting:
+  alertmanagers:
+    - relabel_configs:
+        - source_labels: [__address__]
+          target_label: __param_target
+        - target_label: __address__
+          replacement: good
+      static_configs:
+        - targets:
+            - http://bad
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml 2.33.5+ds1-2/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.bad.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,8 @@
+scrape_configs:
+  - job_name: prometheus
+    relabel_configs:
+      - source_labels: [__address__]
+        target_label: __param_target
+    static_configs:
+      - targets:
+          - http://bad
diff -pruN 2.31.2+ds1-1/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml 2.33.5+ds1-2/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml
--- 2.31.2+ds1-1/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/testdata/url_in_scrape_targetgroup_with_relabel_config.good.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,10 @@
+scrape_configs:
+  - job_name: prometheus
+    relabel_configs:
+      - source_labels: [__address__]
+        target_label: __param_target
+      - target_label: __address__
+        replacement: good
+    static_configs:
+      - targets:
+          - http://good
diff -pruN 2.31.2+ds1-1/cmd/promtool/tsdb.go 2.33.5+ds1-2/cmd/promtool/tsdb.go
--- 2.31.2+ds1-1/cmd/promtool/tsdb.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/tsdb.go	2022-03-08 16:34:32.000000000 +0000
@@ -17,7 +17,6 @@ import (
 	"bufio"
 	"context"
 	"fmt"
-	"github.com/prometheus/prometheus/tsdb/index"
 	"io"
 	"io/ioutil"
 	"math"
@@ -32,11 +31,14 @@ import (
 	"text/tabwriter"
 	"time"
 
+	"github.com/prometheus/prometheus/storage"
+	"github.com/prometheus/prometheus/tsdb/index"
+
 	"github.com/alecthomas/units"
 	"github.com/go-kit/log"
 	"github.com/pkg/errors"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/tsdb"
 	"github.com/prometheus/prometheus/tsdb/chunks"
 	tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
@@ -78,7 +80,7 @@ func benchmarkWrite(outPath, samplesFile
 	if err := os.RemoveAll(b.outPath); err != nil {
 		return err
 	}
-	if err := os.MkdirAll(b.outPath, 0777); err != nil {
+	if err := os.MkdirAll(b.outPath, 0o777); err != nil {
 		return err
 	}
 
@@ -187,7 +189,7 @@ func (b *writeBenchmark) ingestScrapesSh
 	type sample struct {
 		labels labels.Labels
 		value  int64
-		ref    *uint64
+		ref    *storage.SeriesRef
 	}
 
 	scrape := make([]*sample, 0, len(lbls))
@@ -207,7 +209,7 @@ func (b *writeBenchmark) ingestScrapesSh
 		for _, s := range scrape {
 			s.value += 1000
 
-			var ref uint64
+			var ref storage.SeriesRef
 			if s.ref != nil {
 				ref = *s.ref
 			}
@@ -589,7 +591,7 @@ func analyzeCompaction(block tsdb.BlockR
 	histogram := make([]int, nBuckets)
 	totalChunks := 0
 	for postingsr.Next() {
-		var lbsl = labels.Labels{}
+		lbsl := labels.Labels{}
 		var chks []chunks.Meta
 		if err := indexr.Series(postingsr.At(), &lbsl, &chks); err != nil {
 			return err
@@ -671,14 +673,14 @@ func checkErr(err error) int {
 	return 0
 }
 
-func backfillOpenMetrics(path string, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
+func backfillOpenMetrics(path, outputDir string, humanReadable, quiet bool, maxBlockDuration time.Duration) int {
 	inputFile, err := fileutil.OpenMmapFile(path)
 	if err != nil {
 		return checkErr(err)
 	}
 	defer inputFile.Close()
 
-	if err := os.MkdirAll(outputDir, 0777); err != nil {
+	if err := os.MkdirAll(outputDir, 0o777); err != nil {
 		return checkErr(errors.Wrap(err, "create output dir"))
 	}
 
diff -pruN 2.31.2+ds1-1/cmd/promtool/unittest.go 2.33.5+ds1-2/cmd/promtool/unittest.go
--- 2.31.2+ds1-1/cmd/promtool/unittest.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/unittest.go	2022-03-08 16:34:32.000000000 +0000
@@ -30,7 +30,7 @@ import (
 	"github.com/prometheus/common/model"
 	yaml "gopkg.in/yaml.v2"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/promql"
 	"github.com/prometheus/prometheus/promql/parser"
 	"github.com/prometheus/prometheus/rules"
@@ -47,6 +47,7 @@ func RulesUnitTest(queryOpts promql.Lazy
 			fmt.Fprintln(os.Stderr, "  FAILED:")
 			for _, e := range errs {
 				fmt.Fprintln(os.Stderr, e.Error())
+				fmt.Println()
 			}
 			failed = true
 		} else {
@@ -55,9 +56,9 @@ func RulesUnitTest(queryOpts promql.Lazy
 		fmt.Println()
 	}
 	if failed {
-		return 1
+		return failureExitCode
 	}
-	return 0
+	return successExitCode
 }
 
 func ruleUnitTest(filename string, queryOpts promql.LazyLoaderOpts) []error {
@@ -313,30 +314,18 @@ func (tg *testGroup) test(evalInterval t
 					})
 				}
 
-				var sb strings.Builder
-				if gotAlerts.Len() != expAlerts.Len() {
-					if tg.TestGroupName != "" {
-						fmt.Fprintf(&sb, "    name: %s,\n", tg.TestGroupName)
-					}
-					fmt.Fprintf(&sb, "    alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String())
-					fmt.Fprintf(&sb, "        exp:%#v, \n", expAlerts.String())
-					fmt.Fprintf(&sb, "        got:%#v", gotAlerts.String())
-
-					errs = append(errs, errors.New(sb.String()))
-				} else {
-					sort.Sort(gotAlerts)
-					sort.Sort(expAlerts)
-
-					if !reflect.DeepEqual(expAlerts, gotAlerts) {
-						if tg.TestGroupName != "" {
-							fmt.Fprintf(&sb, "    name: %s,\n", tg.TestGroupName)
-						}
-						fmt.Fprintf(&sb, "    alertname:%s, time:%s, \n", testcase.Alertname, testcase.EvalTime.String())
-						fmt.Fprintf(&sb, "        exp:%#v, \n", expAlerts.String())
-						fmt.Fprintf(&sb, "        got:%#v", gotAlerts.String())
+				sort.Sort(gotAlerts)
+				sort.Sort(expAlerts)
 
-						errs = append(errs, errors.New(sb.String()))
+				if !reflect.DeepEqual(expAlerts, gotAlerts) {
+					var testName string
+					if tg.TestGroupName != "" {
+						testName = fmt.Sprintf("    name: %s,\n", tg.TestGroupName)
 					}
+					expString := indentLines(expAlerts.String(), "            ")
+					gotString := indentLines(gotAlerts.String(), "            ")
+					errs = append(errs, errors.Errorf("%s    alertname: %s, time: %s, \n        exp:%v, \n        got:%v",
+						testName, testcase.Alertname, testcase.EvalTime.String(), expString, gotString))
 				}
 			}
 
@@ -385,7 +374,7 @@ Outer:
 			return labels.Compare(gotSamples[i].Labels, gotSamples[j].Labels) <= 0
 		})
 		if !reflect.DeepEqual(expSamples, gotSamples) {
-			errs = append(errs, errors.Errorf("    expr: %q, time: %s,\n        exp:%#v\n        got:%#v", testCase.Expr,
+			errs = append(errs, errors.Errorf("    expr: %q, time: %s,\n        exp: %v\n        got: %v", testCase.Expr,
 				testCase.EvalTime.String(), parsedSamplesString(expSamples), parsedSamplesString(gotSamples)))
 		}
 	}
@@ -398,7 +387,6 @@ Outer:
 
 // seriesLoadingString returns the input series in PromQL notation.
 func (tg *testGroup) seriesLoadingString() string {
-
 	result := fmt.Sprintf("load %v\n", shortDuration(tg.Interval))
 	for _, is := range tg.InputSeries {
 		result += fmt.Sprintf("  %v %v\n", is.Series, is.Values)
@@ -468,6 +456,23 @@ func query(ctx context.Context, qs strin
 	}
 }
 
+// indentLines prefixes each line in the supplied string with the given "indent"
+// string.
+func indentLines(lines, indent string) string {
+	sb := strings.Builder{}
+	n := strings.Split(lines, "\n")
+	for i, l := range n {
+		if i > 0 {
+			sb.WriteString(indent)
+		}
+		sb.WriteString(l)
+		if i != len(n)-1 {
+			sb.WriteRune('\n')
+		}
+	}
+	return sb.String()
+}
+
 type labelsAndAnnotations []labelAndAnnotation
 
 func (la labelsAndAnnotations) Len() int      { return len(la) }
@@ -484,11 +489,11 @@ func (la labelsAndAnnotations) String()
 	if len(la) == 0 {
 		return "[]"
 	}
-	s := "[" + la[0].String()
-	for _, l := range la[1:] {
-		s += ", " + l.String()
+	s := "[\n0:" + indentLines("\n"+la[0].String(), "  ")
+	for i, l := range la[1:] {
+		s += ",\n" + fmt.Sprintf("%d", i+1) + ":" + indentLines("\n"+l.String(), "  ")
 	}
-	s += "]"
+	s += "\n]"
 
 	return s
 }
@@ -499,7 +504,7 @@ type labelAndAnnotation struct {
 }
 
 func (la *labelAndAnnotation) String() string {
-	return "Labels:" + la.Labels.String() + " Annotations:" + la.Annotations.String()
+	return "Labels:" + la.Labels.String() + "\nAnnotations:" + la.Annotations.String()
 }
 
 type series struct {
diff -pruN 2.31.2+ds1-1/cmd/promtool/unittest_test.go 2.33.5+ds1-2/cmd/promtool/unittest_test.go
--- 2.31.2+ds1-1/cmd/promtool/unittest_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/cmd/promtool/unittest_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -37,6 +37,13 @@ func TestRulesUnitTest(t *testing.T) {
 			want: 0,
 		},
 		{
+			name: "Long evaluation interval",
+			args: args{
+				files: []string{"./testdata/long-period.yml"},
+			},
+			want: 0,
+		},
+		{
 			name: "Bad input series",
 			args: args{
 				files: []string{"./testdata/bad-input-series.yml"},
diff -pruN 2.31.2+ds1-1/config/config.go 2.33.5+ds1-2/config/config.go
--- 2.31.2+ds1-1/config/config.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/config/config.go	2022-03-08 16:34:32.000000000 +0000
@@ -33,8 +33,8 @@ import (
 	yaml "gopkg.in/yaml.v2"
 
 	"github.com/prometheus/prometheus/discovery"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/relabel"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/relabel"
 )
 
 var (
@@ -99,7 +99,7 @@ func Load(s string, expandExternalLabels
 }
 
 // LoadFile parses the given YAML file into a Config.
-func LoadFile(filename string, expandExternalLabels bool, logger log.Logger) (*Config, error) {
+func LoadFile(filename string, agentMode, expandExternalLabels bool, logger log.Logger) (*Config, error) {
 	content, err := ioutil.ReadFile(filename)
 	if err != nil {
 		return nil, err
@@ -108,6 +108,25 @@ func LoadFile(filename string, expandExt
 	if err != nil {
 		return nil, errors.Wrapf(err, "parsing YAML file %s", filename)
 	}
+
+	if agentMode {
+		if len(cfg.RemoteWriteConfigs) == 0 {
+			return nil, errors.New("at least one remote_write target must be specified in agent mode")
+		}
+
+		if len(cfg.AlertingConfig.AlertmanagerConfigs) > 0 || len(cfg.AlertingConfig.AlertRelabelConfigs) > 0 {
+			return nil, errors.New("field alerting is not allowed in agent mode")
+		}
+
+		if len(cfg.RuleFiles) > 0 {
+			return nil, errors.New("field rule_files is not allowed in agent mode")
+		}
+
+		if len(cfg.RemoteReadConfigs) > 0 {
+			return nil, errors.New("field remote_read is not allowed in agent mode")
+		}
+	}
+
 	cfg.SetDirectory(filepath.Dir(filename))
 	return cfg, nil
 }
@@ -169,7 +188,7 @@ var (
 
 		// Backoff times for retrying a batch of samples on recoverable errors.
 		MinBackoff: model.Duration(30 * time.Millisecond),
-		MaxBackoff: model.Duration(100 * time.Millisecond),
+		MaxBackoff: model.Duration(5 * time.Second),
 	}
 
 	// DefaultMetadataConfig is the default metadata configuration for a remote write endpoint.
diff -pruN 2.31.2+ds1-1/config/config_test.go 2.33.5+ds1-2/config/config_test.go
--- 2.31.2+ds1-1/config/config_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/config/config_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -52,8 +52,8 @@ import (
 	"github.com/prometheus/prometheus/discovery/uyuni"
 	"github.com/prometheus/prometheus/discovery/xds"
 	"github.com/prometheus/prometheus/discovery/zookeeper"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/relabel"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/relabel"
 )
 
 func mustParseURL(u string) *config.URL {
@@ -227,7 +227,6 @@ var expectedConf = &Config{
 			},
 		},
 		{
-
 			JobName: "service-x",
 
 			HonorTimestamps: true,
@@ -784,17 +783,19 @@ var expectedConf = &Config{
 			Scheme:           DefaultScrapeConfig.Scheme,
 			HTTPClientConfig: config.DefaultHTTPClientConfig,
 
-			ServiceDiscoveryConfigs: discovery.Configs{&openstack.SDConfig{
-				Role:            "instance",
-				Region:          "RegionOne",
-				Port:            80,
-				Availability:    "public",
-				RefreshInterval: model.Duration(60 * time.Second),
-				TLSConfig: config.TLSConfig{
-					CAFile:   "testdata/valid_ca_file",
-					CertFile: "testdata/valid_cert_file",
-					KeyFile:  "testdata/valid_key_file",
-				}},
+			ServiceDiscoveryConfigs: discovery.Configs{
+				&openstack.SDConfig{
+					Role:            "instance",
+					Region:          "RegionOne",
+					Port:            80,
+					Availability:    "public",
+					RefreshInterval: model.Duration(60 * time.Second),
+					TLSConfig: config.TLSConfig{
+						CAFile:   "testdata/valid_ca_file",
+						CertFile: "testdata/valid_cert_file",
+						KeyFile:  "testdata/valid_key_file",
+					},
+				},
 			},
 		},
 		{
@@ -808,22 +809,23 @@ var expectedConf = &Config{
 			Scheme:           DefaultScrapeConfig.Scheme,
 			HTTPClientConfig: config.DefaultHTTPClientConfig,
 
-			ServiceDiscoveryConfigs: discovery.Configs{&puppetdb.SDConfig{
-				URL:               "https://puppetserver/",
-				Query:             "resources { type = \"Package\" and title = \"httpd\" }",
-				IncludeParameters: true,
-				Port:              80,
-				RefreshInterval:   model.Duration(60 * time.Second),
-				HTTPClientConfig: config.HTTPClientConfig{
-					FollowRedirects: true,
-					TLSConfig: config.TLSConfig{
-						CAFile:   "testdata/valid_ca_file",
-						CertFile: "testdata/valid_cert_file",
-						KeyFile:  "testdata/valid_key_file",
+			ServiceDiscoveryConfigs: discovery.Configs{
+				&puppetdb.SDConfig{
+					URL:               "https://puppetserver/",
+					Query:             "resources { type = \"Package\" and title = \"httpd\" }",
+					IncludeParameters: true,
+					Port:              80,
+					RefreshInterval:   model.Duration(60 * time.Second),
+					HTTPClientConfig: config.HTTPClientConfig{
+						FollowRedirects: true,
+						TLSConfig: config.TLSConfig{
+							CAFile:   "testdata/valid_ca_file",
+							CertFile: "testdata/valid_cert_file",
+							KeyFile:  "testdata/valid_key_file",
+						},
 					},
 				},
 			},
-			},
 		},
 		{
 			JobName:         "hetzner",
@@ -951,7 +953,7 @@ var expectedConf = &Config{
 			Scheme:           DefaultScrapeConfig.Scheme,
 			ServiceDiscoveryConfigs: discovery.Configs{
 				&uyuni.SDConfig{
-					Server:          kubernetesSDHostURL(),
+					Server:          "https://localhost:1234",
 					Username:        "gopher",
 					Password:        "hole",
 					Entitlement:     "monitoring_entitled",
@@ -986,7 +988,7 @@ var expectedConf = &Config{
 }
 
 func TestYAMLRoundtrip(t *testing.T) {
-	want, err := LoadFile("testdata/roundtrip.good.yml", false, log.NewNopLogger())
+	want, err := LoadFile("testdata/roundtrip.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 
 	out, err := yaml.Marshal(want)
@@ -999,7 +1001,7 @@ func TestYAMLRoundtrip(t *testing.T) {
 }
 
 func TestRemoteWriteRetryOnRateLimit(t *testing.T) {
-	want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, log.NewNopLogger())
+	want, err := LoadFile("testdata/remote_write_retry_on_rate_limit.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 
 	out, err := yaml.Marshal(want)
@@ -1015,16 +1017,16 @@ func TestRemoteWriteRetryOnRateLimit(t *
 func TestLoadConfig(t *testing.T) {
 	// Parse a valid file that sets a global scrape timeout. This tests whether parsing
 	// an overwritten default field in the global config permanently changes the default.
-	_, err := LoadFile("testdata/global_timeout.good.yml", false, log.NewNopLogger())
+	_, err := LoadFile("testdata/global_timeout.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 
-	c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger())
+	c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 	require.Equal(t, expectedConf, c)
 }
 
 func TestScrapeIntervalLarger(t *testing.T) {
-	c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, log.NewNopLogger())
+	c, err := LoadFile("testdata/scrape_interval_larger.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 	require.Equal(t, 1, len(c.ScrapeConfigs))
 	for _, sc := range c.ScrapeConfigs {
@@ -1034,7 +1036,7 @@ func TestScrapeIntervalLarger(t *testing
 
 // YAML marshaling must not reveal authentication credentials.
 func TestElideSecrets(t *testing.T) {
-	c, err := LoadFile("testdata/conf.good.yml", false, log.NewNopLogger())
+	c, err := LoadFile("testdata/conf.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 
 	secretRe := regexp.MustCompile(`\\u003csecret\\u003e|<secret>`)
@@ -1051,31 +1053,31 @@ func TestElideSecrets(t *testing.T) {
 
 func TestLoadConfigRuleFilesAbsolutePath(t *testing.T) {
 	// Parse a valid file that sets a rule files with an absolute path
-	c, err := LoadFile(ruleFilesConfigFile, false, log.NewNopLogger())
+	c, err := LoadFile(ruleFilesConfigFile, false, false, log.NewNopLogger())
 	require.NoError(t, err)
 	require.Equal(t, ruleFilesExpectedConf, c)
 }
 
 func TestKubernetesEmptyAPIServer(t *testing.T) {
-	_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, log.NewNopLogger())
+	_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 }
 
 func TestKubernetesWithKubeConfig(t *testing.T) {
-	_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, log.NewNopLogger())
+	_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 }
 
 func TestKubernetesSelectors(t *testing.T) {
-	_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, log.NewNopLogger())
+	_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
-	_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, log.NewNopLogger())
+	_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
-	_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, log.NewNopLogger())
+	_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
-	_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, log.NewNopLogger())
+	_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
-	_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, log.NewNopLogger())
+	_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 }
 
@@ -1086,170 +1088,232 @@ var expectedErrors = []struct {
 	{
 		filename: "jobname.bad.yml",
 		errMsg:   `job_name is empty`,
-	}, {
+	},
+	{
 		filename: "jobname_dup.bad.yml",
 		errMsg:   `found multiple scrape configs with job name "prometheus"`,
-	}, {
+	},
+	{
 		filename: "scrape_interval.bad.yml",
 		errMsg:   `scrape timeout greater than scrape interval`,
-	}, {
+	},
+	{
 		filename: "labelname.bad.yml",
 		errMsg:   `"not$allowed" is not a valid label name`,
-	}, {
+	},
+	{
 		filename: "labelname2.bad.yml",
 		errMsg:   `"not:allowed" is not a valid label name`,
-	}, {
+	},
+	{
 		filename: "labelvalue.bad.yml",
 		errMsg:   `"\xff" is not a valid label value`,
-	}, {
+	},
+	{
 		filename: "regex.bad.yml",
 		errMsg:   "error parsing regexp",
-	}, {
+	},
+	{
 		filename: "modulus_missing.bad.yml",
 		errMsg:   "relabel configuration for hashmod requires non-zero modulus",
-	}, {
+	},
+	{
 		filename: "labelkeep.bad.yml",
 		errMsg:   "labelkeep action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labelkeep2.bad.yml",
 		errMsg:   "labelkeep action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labelkeep3.bad.yml",
 		errMsg:   "labelkeep action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labelkeep4.bad.yml",
 		errMsg:   "labelkeep action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labelkeep5.bad.yml",
 		errMsg:   "labelkeep action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labeldrop.bad.yml",
 		errMsg:   "labeldrop action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labeldrop2.bad.yml",
 		errMsg:   "labeldrop action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labeldrop3.bad.yml",
 		errMsg:   "labeldrop action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labeldrop4.bad.yml",
 		errMsg:   "labeldrop action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labeldrop5.bad.yml",
 		errMsg:   "labeldrop action requires only 'regex', and no other fields",
-	}, {
+	},
+	{
 		filename: "labelmap.bad.yml",
 		errMsg:   "\"l-$1\" is invalid 'replacement' for labelmap action",
-	}, {
+	},
+	{
 		filename: "rules.bad.yml",
 		errMsg:   "invalid rule file path",
-	}, {
+	},
+	{
 		filename: "unknown_attr.bad.yml",
 		errMsg:   "field consult_sd_configs not found in type",
-	}, {
+	},
+	{
 		filename: "bearertoken.bad.yml",
 		errMsg:   "at most one of bearer_token & bearer_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "bearertoken_basicauth.bad.yml",
 		errMsg:   "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "kubernetes_http_config_without_api_server.bad.yml",
 		errMsg:   "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
-	}, {
+	},
+	{
+		filename: "kubernetes_kubeconfig_with_own_namespace.bad.yml",
+		errMsg:   "cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously",
+	},
+	{
+		filename: "kubernetes_api_server_with_own_namespace.bad.yml",
+		errMsg:   "cannot use 'api_server' and 'namespaces.own_namespace' simultaneously",
+	},
+	{
 		filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
 		errMsg:   "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
-	}, {
+	},
+	{
 		filename: "kubernetes_kubeconfig_with_http_config.bad.yml",
 		errMsg:   "cannot use a custom HTTP client configuration together with 'kubeconfig_file'",
 	},
 	{
 		filename: "kubernetes_bearertoken.bad.yml",
 		errMsg:   "at most one of bearer_token & bearer_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "kubernetes_role.bad.yml",
 		errMsg:   "role",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_endpoints.bad.yml",
 		errMsg:   "endpoints role supports only pod, service, endpoints selectors",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_ingress.bad.yml",
 		errMsg:   "ingress role supports only ingress selectors",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_node.bad.yml",
 		errMsg:   "node role supports only node selectors",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_pod.bad.yml",
 		errMsg:   "pod role supports only pod selectors",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_service.bad.yml",
 		errMsg:   "service role supports only service selectors",
-	}, {
+	},
+	{
 		filename: "kubernetes_namespace_discovery.bad.yml",
 		errMsg:   "field foo not found in type kubernetes.plain",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_duplicated_role.bad.yml",
 		errMsg:   "duplicated selector role: pod",
-	}, {
+	},
+	{
 		filename: "kubernetes_selectors_incorrect_selector.bad.yml",
 		errMsg:   "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
-	}, {
+	},
+	{
 		filename: "kubernetes_bearertoken_basicauth.bad.yml",
 		errMsg:   "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "kubernetes_authorization_basicauth.bad.yml",
 		errMsg:   "at most one of basic_auth, oauth2 & authorization must be configured",
-	}, {
+	},
+	{
 		filename: "marathon_no_servers.bad.yml",
 		errMsg:   "marathon_sd: must contain at least one Marathon server",
-	}, {
+	},
+	{
 		filename: "marathon_authtoken_authtokenfile.bad.yml",
 		errMsg:   "marathon_sd: at most one of auth_token & auth_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "marathon_authtoken_basicauth.bad.yml",
 		errMsg:   "marathon_sd: at most one of basic_auth, auth_token & auth_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "marathon_authtoken_bearertoken.bad.yml",
 		errMsg:   "marathon_sd: at most one of bearer_token, bearer_token_file, auth_token & auth_token_file must be configured",
-	}, {
+	},
+	{
 		filename: "marathon_authtoken_authorization.bad.yml",
 		errMsg:   "marathon_sd: at most one of auth_token, auth_token_file & authorization must be configured",
-	}, {
+	},
+	{
 		filename: "openstack_role.bad.yml",
 		errMsg:   "unknown OpenStack SD role",
-	}, {
+	},
+	{
 		filename: "openstack_availability.bad.yml",
 		errMsg:   "unknown availability invalid, must be one of admin, internal or public",
-	}, {
+	},
+	{
 		filename: "url_in_targetgroup.bad.yml",
 		errMsg:   "\"http://bad\" is not a valid hostname",
-	}, {
+	},
+	{
 		filename: "target_label_missing.bad.yml",
 		errMsg:   "relabel configuration for replace action requires 'target_label' value",
-	}, {
+	},
+	{
 		filename: "target_label_hashmod_missing.bad.yml",
 		errMsg:   "relabel configuration for hashmod action requires 'target_label' value",
-	}, {
+	},
+	{
 		filename: "unknown_global_attr.bad.yml",
 		errMsg:   "field nonexistent_field not found in type config.plain",
-	}, {
+	},
+	{
 		filename: "remote_read_url_missing.bad.yml",
 		errMsg:   `url for remote_read is empty`,
-	}, {
+	},
+	{
 		filename: "remote_write_header.bad.yml",
 		errMsg:   `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
-	}, {
+	},
+	{
 		filename: "remote_read_header.bad.yml",
 		errMsg:   `x-prometheus-remote-write-version is a reserved header. It must not be changed`,
-	}, {
+	},
+	{
 		filename: "remote_write_authorization_header.bad.yml",
 		errMsg:   `authorization header must be changed via the basic_auth, authorization, oauth2, or sigv4 parameter`,
-	}, {
+	},
+	{
 		filename: "remote_write_url_missing.bad.yml",
 		errMsg:   `url for remote_write is empty`,
-	}, {
+	},
+	{
 		filename: "remote_write_dup.bad.yml",
 		errMsg:   `found multiple remote write configs with job name "queue1"`,
-	}, {
+	},
+	{
 		filename: "remote_read_dup.bad.yml",
 		errMsg:   `found multiple remote read configs with job name "queue1"`,
 	},
@@ -1377,11 +1441,15 @@ var expectedErrors = []struct {
 		filename: "empty_scrape_config_action.bad.yml",
 		errMsg:   "relabel action cannot be empty",
 	},
+	{
+		filename: "uyuni_no_server.bad.yml",
+		errMsg:   "Uyuni SD configuration requires server host",
+	},
 }
 
 func TestBadConfigs(t *testing.T) {
 	for _, ee := range expectedErrors {
-		_, err := LoadFile("testdata/"+ee.filename, false, log.NewNopLogger())
+		_, err := LoadFile("testdata/"+ee.filename, false, false, log.NewNopLogger())
 		require.Error(t, err, "%s", ee.filename)
 		require.Contains(t, err.Error(), ee.errMsg,
 			"Expected error for %s to contain %q but got: %s", ee.filename, ee.errMsg, err)
@@ -1415,20 +1483,20 @@ func TestExpandExternalLabels(t *testing
 	// Cleanup ant TEST env variable that could exist on the system.
 	os.Setenv("TEST", "")
 
-	c, err := LoadFile("testdata/external_labels.good.yml", false, log.NewNopLogger())
+	c, err := LoadFile("testdata/external_labels.good.yml", false, false, log.NewNopLogger())
 	require.NoError(t, err)
 	require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
 	require.Equal(t, labels.Label{Name: "baz", Value: "foo${TEST}bar"}, c.GlobalConfig.ExternalLabels[1])
 	require.Equal(t, labels.Label{Name: "foo", Value: "${TEST}"}, c.GlobalConfig.ExternalLabels[2])
 
-	c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger())
+	c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
 	require.NoError(t, err)
 	require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
 	require.Equal(t, labels.Label{Name: "baz", Value: "foobar"}, c.GlobalConfig.ExternalLabels[1])
 	require.Equal(t, labels.Label{Name: "foo", Value: ""}, c.GlobalConfig.ExternalLabels[2])
 
 	os.Setenv("TEST", "TestValue")
-	c, err = LoadFile("testdata/external_labels.good.yml", true, log.NewNopLogger())
+	c, err = LoadFile("testdata/external_labels.good.yml", false, true, log.NewNopLogger())
 	require.NoError(t, err)
 	require.Equal(t, labels.Label{Name: "bar", Value: "foo"}, c.GlobalConfig.ExternalLabels[0])
 	require.Equal(t, labels.Label{Name: "baz", Value: "fooTestValuebar"}, c.GlobalConfig.ExternalLabels[1])
diff -pruN 2.31.2+ds1-1/config/testdata/kubernetes_api_server_with_own_namespace.bad.yml 2.33.5+ds1-2/config/testdata/kubernetes_api_server_with_own_namespace.bad.yml
--- 2.31.2+ds1-1/config/testdata/kubernetes_api_server_with_own_namespace.bad.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/config/testdata/kubernetes_api_server_with_own_namespace.bad.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,7 @@
+scrape_configs:
+  - job_name: prometheus
+    kubernetes_sd_configs:
+      - role: endpoints
+        api_server: 'https://localhost:1234'
+        namespaces:
+          own_namespace: true
diff -pruN 2.31.2+ds1-1/config/testdata/kubernetes_kubeconfig_with_own_namespace.bad.yml 2.33.5+ds1-2/config/testdata/kubernetes_kubeconfig_with_own_namespace.bad.yml
--- 2.31.2+ds1-1/config/testdata/kubernetes_kubeconfig_with_own_namespace.bad.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/config/testdata/kubernetes_kubeconfig_with_own_namespace.bad.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,7 @@
+scrape_configs:
+  - job_name: prometheus
+    kubernetes_sd_configs:
+      - role: endpoints
+        kubeconfig_file: /home/User1/.kubeconfig
+        namespaces:
+          own_namespace: true
diff -pruN 2.31.2+ds1-1/config/testdata/uyuni_no_server.bad.yml 2.33.5+ds1-2/config/testdata/uyuni_no_server.bad.yml
--- 2.31.2+ds1-1/config/testdata/uyuni_no_server.bad.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/config/testdata/uyuni_no_server.bad.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,4 @@
+scrape_configs:
+  - job_name: uyuni
+    uyuni_sd_configs:
+      - server:
diff -pruN 2.31.2+ds1-1/debian/changelog 2.33.5+ds1-2/debian/changelog
--- 2.31.2+ds1-1/debian/changelog	2022-01-22 02:07:45.000000000 +0000
+++ 2.33.5+ds1-2/debian/changelog	2022-03-23 22:28:20.000000000 +0000
@@ -1,3 +1,29 @@
+prometheus (2.33.5+ds1-2) unstable; urgency=medium
+
+  * New patch: Avoid literal integer overflows in 32 bit arches.
+  * debian/rules: Avoid test failures due to newer crypto/x509
+    requirements.
+
+ -- Martina Ferrari <tina@debian.org>  Wed, 23 Mar 2022 22:28:20 +0000
+
+prometheus (2.33.5+ds1-1) unstable; urgency=medium
+
+  * New upstream release.
+  * debian/gbp.conf: Avoid re-creating orig tarball.
+  * debian/copyright: Exclude more files from React UI.
+  * Refresh and reorganise patches.
+    - Removed 11-Set_temporary_storage_path_for_tsdb.patch as the fix was
+      implemented upstream.
+    - Renamed and split 01-Do_not_embed_blobs.patch into
+      11-Disable_react_UI.patch and 12-Do_not_embed_blobs.patch to separate
+      concerns and reflect actual patch order.
+  * debian/patches: Add classic UI page for agent mode.
+  * debian/rules: Update test data paths.
+  * debian/default: Update available flags and descriptions.
+  * debian/copyright: Update years.
+
+ -- Martina Ferrari <tina@debian.org>  Wed, 16 Mar 2022 06:03:09 +0000
+
 prometheus (2.31.2+ds1-1) unstable; urgency=medium
 
   * Team upload.
diff -pruN 2.31.2+ds1-1/debian/copyright 2.33.5+ds1-2/debian/copyright
--- 2.31.2+ds1-1/debian/copyright	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/copyright	2022-03-23 22:28:20.000000000 +0000
@@ -7,6 +7,10 @@ Comment: Upstream embeds many dependenci
  rest was left embedded.
 Files-Excluded:
  vendor
+ web/ui/build_ui.sh
+ web/ui/module
+ web/ui/package.json
+ web/ui/package-lock.json
  web/ui/react-app
  web/ui/static/vendor/bootstrap-4.5.2
  web/ui/static/vendor/bootstrap3-typeahead/bootstrap3-typeahead.min.js
@@ -22,12 +26,12 @@ Files-Excluded:
  web/ui/static/vendor/rickshaw
 
 Files: *
-Copyright: 2012-2020 The Prometheus Authors
+Copyright: 2012-2022 The Prometheus Authors
 License: Apache-2.0
 
 Files: debian/*
-Copyright: 2015 Martina Ferrari <tina@debian.org>
-           2020-2021 Sipwise GmbH, Austria
+Copyright: 2015-2022 Martina Ferrari <tina@debian.org>
+           2020-2022 Sipwise GmbH, Austria
 License: Apache-2.0
 
 Files: web/ui/static/vendor/js/jquery.selection.js
diff -pruN 2.31.2+ds1-1/debian/default 2.33.5+ds1-2/debian/default
--- 2.31.2+ds1-1/debian/default	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/default	2022-03-23 22:28:20.000000000 +0000
@@ -6,85 +6,18 @@ ARGS=""
 
 # prometheus supports the following options:
 #
-#  --alertmanager.notification-queue-capacity=10000
-#    The capacity of the queue for pending Alertmanager notifications.
 #  --config.file="/etc/prometheus/prometheus.yml"
 #    Prometheus configuration file path.
-#  --enable-feature=<feature,...>
-#    Comma separated feature names to enable. Valid options:
-#    exemplar-storage, expand-external-labels, memory-snapshot-on-shutdown,
-#    promql-at-modifier, promql-negative-offset, remote-write-receiver,
-#    extra-scrape-metrics, new-service-discovery-manager. See
-#    https://prometheus.io/docs/prometheus/latest/feature_flags/ for more
-#    details.
-#  --log.format=logfmt
-#    Output format of log messages. One of: [logfmt, json].
-#  --log.level=info
-#    Only log messages with the given severity or above. One of: [debug, info,
-#    warn, error].
-#  --query.lookback-delta=5m
-#    The maximum lookback duration for retrieving metrics during expression
-#    evaluations and federation.
-#  --query.max-concurrency=20
-#    Maximum number of queries executed concurrently.
-#  --query.max-samples=50000000
-#    Maximum number of samples a single query can load into memory. Note that
-#    queries will fail if they try to load more samples than this into memory,
-#    so this also limits the number of samples a query can return.
-#  --query.timeout=2m
-#    Maximum time a query may take before being aborted.
-#  --rules.alert.for-grace-period=10m
-#    Minimum duration between alert and restored "for" state. This is
-#    maintained only for alerts with configured "for" time greater than grace
-#    period.
-#  --rules.alert.for-outage-tolerance=1h
-#    Max time to tolerate prometheus outage for restoring "for" state of alert.
-#  --rules.alert.resend-delay=1m
-#    Minimum amount of time to wait before resending an alert to Alertmanager.
-#  --storage.remote.flush-deadline=<duration>
-#    How long to wait flushing sample on shutdown or config reload.
-#  --storage.remote.read-concurrent-limit=10
-#    Maximum number of concurrent remote read calls. 0 means no limit.
-#  --storage.remote.read-max-bytes-in-frame=1048576
-#    Maximum number of bytes in a single frame for streaming remote read
-#    response types before marshalling. Note that client might have limit on
-#    frame size as well. 1MB as recommended by protobuf by default.
-#  --storage.remote.read-sample-limit=5e7
-#    Maximum overall number of samples to return via the remote read interface,
-#    in a single query. 0 means no limit. This limit is ignored for streamed
-#    response types.
-#  --storage.tsdb.allow-overlapping-blocks
-#    Allow overlapping blocks, which in turn enables vertical compaction and
-#    vertical query merge.
-#  --storage.tsdb.path="/var/lib/prometheus/metrics2/"
-#    Base path for metrics storage.
-#  --storage.tsdb.retention.size=STORAGE.TSDB.RETENTION.SIZE
-#    Maximum number of bytes that can be stored for blocks. A unit is required,
-#    supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB".
-#  --storage.tsdb.retention.time=STORAGE.TSDB.RETENTION.TIME
-#    How long to retain samples in storage. When this flag is set it overrides
-#    "storage.tsdb.retention". If neither this flag nor
-#    "storage.tsdb.retention" nor "storage.tsdb.retention.size" is set, the
-#    retention time defaults to 15d. Units Supported: y, w, d, h, m, s, ms.
-#  --storage.tsdb.retention=STORAGE.TSDB.RETENTION
-#    [DEPRECATED] How long to retain samples in storage. This flag has been
-#    deprecated, use "storage.tsdb.retention.time" instead.
-#  --storage.tsdb.use-lockfile
-#    Create a lockfile in data directory.
+#  --web.listen-address="0.0.0.0:9090"
+#    Address to listen on for UI, API, and telemetry.
 #  --web.config.file=""
 #    [EXPERIMENTAL] Path to configuration file that can enable TLS or
 #    authentication.
-#  --web.console.libraries="/etc/prometheus/console_libraries"
-#    Path to the console library directory.
-#  --web.console.templates="/etc/prometheus/consoles"
-#    Path to the console template directory, available at /consoles.
-#  --web.cors.origin=".*"
-#    Regex for CORS origin. It is fully anchored. Example:
-#    'https?://(domain1|domain2)\.com'.
-#  --web.enable-admin-api
-#    Enable API endpoints for admin control actions.
-#  --web.enable-lifecycle
-#    Enable shutdown and reload via HTTP request.
+#  --web.read-timeout=5m
+#    Maximum duration before timing out read of the request, and closing idle
+#    connections.
+#  --web.max-connections=512
+#    Maximum number of simultaneous connections.
 #  --web.external-url=<URL>
 #    The URL under which Prometheus is externally reachable (for example, if
 #    Prometheus is served via a reverse proxy). Used for generating relative
@@ -92,19 +25,125 @@ ARGS=""
 #    portion, it will be used to prefix all HTTP endpoints served by
 #    Prometheus. If omitted, relevant URL components will be derived
 #    automatically.
-#  --web.listen-address="0.0.0.0:9090"
-#    Address to listen on for UI, API, and telemetry.
-#  --web.local-assets="/usr/share/prometheus/web/"
-#    Path to static asset/templates directory.
-#  --web.max-connections=512
-#    Maximum number of simultaneous connections.
-#  --web.page-title="Prometheus Time Series Collection and Processing Server"
-#    Document title of Prometheus instance.
-#  --web.read-timeout=5m
-#    Maximum duration before timing out read of the request, and closing idle
-#    connections.
 #  --web.route-prefix=<path>
 #    Prefix for the internal routes of web endpoints. Defaults to path of
 #    --web.external-url.
+#  --web.local-assets="/usr/share/prometheus/web/"
+#    Path to static asset/templates directory.
 #  --web.user-assets=<path>
 #    Path to user asset directory, available at /user.
+#  --web.enable-lifecycle
+#    Enable shutdown and reload via HTTP request.
+#  --web.enable-admin-api
+#    Enable API endpoints for admin control actions.
+#  --web.enable-remote-write-receiver
+#    Enable API endpoint accepting remote write requests.
+#  --web.console.templates="/etc/prometheus/consoles"
+#    Path to the console template directory, available at /consoles.
+#  --web.console.libraries="/etc/prometheus/console_libraries"
+#    Path to the console library directory.
+#  --web.page-title="Prometheus Time Series Collection and Processing Server"
+#    Document title of Prometheus instance.
+#  --web.cors.origin=".*"
+#    Regex for CORS origin. It is fully anchored.
+#    Example: 'https?://(domain1|domain2)\.com'
+#  --storage.tsdb.path="/var/lib/prometheus/metrics2/"
+#    Base path for metrics storage.
+#    Use with server mode only.
+#  --storage.tsdb.retention=STORAGE.TSDB.RETENTION
+#    [DEPRECATED] How long to retain samples in storage. This flag has been
+#    deprecated, use "storage.tsdb.retention.time" instead.
+#    Use with server mode only.
+#  --storage.tsdb.retention.time=STORAGE.TSDB.RETENTION.TIME
+#    How long to retain samples in storage.
+#    When this flag is set it overrides "storage.tsdb.retention". If neither
+#    this flag nor "storage.tsdb.retention" nor "storage.tsdb.retention.size"
+#    is set, the retention time defaults to 15d.
+#    Units Supported: y, w, d, h, m, s, ms.
+#    Use with server mode only.
+#  --storage.tsdb.retention.size=STORAGE.TSDB.RETENTION.SIZE
+#    Maximum number of bytes that can be stored for blocks. A unit is required,
+#    supported units: B, KB, MB, GB, TB, PB, EB. Ex: "512MB".
+#    Use with server mode only.
+#  --storage.tsdb.use-lockfile
+#    Create a lockfile in data directory.
+#    Use with server mode only.
+#  --storage.tsdb.allow-overlapping-blocks
+#    Allow overlapping blocks, which in turn enables vertical compaction and
+#    vertical query merge.
+#    Use with server mode only.
+#  --storage.agent.path="/var/lib/prometheus/data-agent/"
+#    Base path for metrics storage.
+#    Use with agent mode only.
+#  --storage.agent.wal-compression
+#    Compress the agent WAL.
+#    Use with agent mode only.
+#  --storage.agent.retention.min-time=STORAGE.AGENT.RETENTION.MIN-TIME
+#    Minimum age samples may be before being considered for deletion when the
+#    WAL is truncated.
+#    Use with agent mode only.
+#  --storage.agent.retention.max-time=STORAGE.AGENT.RETENTION.MAX-TIME
+#    Maximum age samples may be before being forcibly deleted when the WAL is
+#    truncated.
+#    Use with agent mode only.
+#  --storage.agent.use-lockfile
+#    Create a lockfile in data directory.
+#    Use with agent mode only.
+#  --storage.remote.flush-deadline=<duration>
+#    How long to wait flushing sample on shutdown or config reload.
+#  --storage.remote.read-sample-limit=5e7
+#    Maximum overall number of samples to return via the remote read interface,
+#    in a single query. 0 means no limit. This limit is ignored for streamed
+#    response types.
+#    Use with server mode only.
+#  --storage.remote.read-concurrent-limit=10
+#    Maximum number of concurrent remote read calls. 0 means no limit.
+#    Use with server mode only.
+#  --storage.remote.read-max-bytes-in-frame=1048576
+#    Maximum number of bytes in a single frame for streaming remote read
+#    response types before marshalling. Note that client might have limit on
+#    frame size as well. 1MB as recommended by protobuf by default.
+#    Use with server mode only.
+#  --rules.alert.for-outage-tolerance=1h
+#    Max time to tolerate prometheus outage for restoring "for" state of alert.
+#    Use with server mode only.
+#  --rules.alert.for-grace-period=10m
+#    Minimum duration between alert and restored "for" state. This is
+#    maintained only for alerts with configured "for" time greater than grace
+#    period.
+#    Use with server mode only.
+#  --rules.alert.resend-delay=1m
+#    Minimum amount of time to wait before resending an alert to Alertmanager.
+#    Use with server mode only.
+#  --alertmanager.notification-queue-capacity=10000
+#    The capacity of the queue for pending Alertmanager notifications.
+#    Use with server mode only.
+#  --query.lookback-delta=5m
+#    The maximum lookback duration for retrieving metrics during expression
+#    evaluations and federation.
+#    Use with server mode only.
+#  --query.timeout=2m
+#    Maximum time a query may take before being aborted.
+#    Use with server mode only.
+#  --query.max-concurrency=20
+#    Maximum number of queries executed concurrently.
+#    Use with server mode only.
+#  --query.max-samples=50000000
+#    Maximum number of samples a single query can load into memory. Note that
+#    queries will fail if they try to load more samples than this into memory,
+#    so this also limits the number of samples a query can return.
+#    Use with server mode only.
+#  --enable-feature=...
+#    Comma separated feature names to enable.
+#    Valid options: agent, exemplar-storage, expand-external-labels,
+#    memory-snapshot-on-shutdown, promql-at-modifier, promql-negative-offset,
+#    remote-write-receiver (DEPRECATED), extra-scrape-metrics,
+#    new-service-discovery-manager.
+#    See https://prometheus.io/docs/prometheus/latest/feature_flags/ for more
+#    details.
+#  --log.level=info
+#    Only log messages with the given severity or above. One of: [debug, info,
+#    warn, error].
+#  --log.format=logfmt
+#    Output format of log messages. One of: [logfmt, json].
+
diff -pruN 2.31.2+ds1-1/debian/gbp.conf 2.33.5+ds1-2/debian/gbp.conf
--- 2.31.2+ds1-1/debian/gbp.conf	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/gbp.conf	2022-03-23 22:28:20.000000000 +0000
@@ -1,3 +1,6 @@
 [DEFAULT]
 debian-branch = debian/sid
+
+[buildpackage]
 dist = DEP14
+no-create-orig = True
diff -pruN 2.31.2+ds1-1/debian/patches/01-Do_not_embed_blobs.patch 2.33.5+ds1-2/debian/patches/01-Do_not_embed_blobs.patch
--- 2.31.2+ds1-1/debian/patches/01-Do_not_embed_blobs.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/01-Do_not_embed_blobs.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,302 +0,0 @@
-From: Martina Ferrari <tina@debian.org>
-Date: Sat, 20 Jun 2020 15:32:33 -0300
-Subject: Do not embed blobs
-
-Avoid embedding blobs into the prometheus binary, instead use files
-installed on disk.
----
----
- cmd/prometheus/main.go      |    3 +
- console_libraries/prom.lib  |   17 ++++-----
- web/ui/doc.go               |    2 +
- web/ui/templates/_base.html |   11 ++----
- web/ui/templates/graph.html |   22 +++++-------
- web/ui/ui.go                |    3 -
- web/web.go                  |   78 +++-----------------------------------------
- web/web_test.go             |    1 
- 8 files changed, 37 insertions(+), 100 deletions(-)
-
---- a/cmd/prometheus/main.go
-+++ b/cmd/prometheus/main.go
-@@ -221,6 +221,9 @@ func main() {
- 		"Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").
- 		PlaceHolder("<path>").StringVar(&cfg.web.RoutePrefix)
- 
-+	a.Flag("web.local-assets", "Path to static asset/templates directory.").
-+		Default("/usr/share/prometheus/web/").StringVar(&cfg.web.LocalAssets)
-+
- 	a.Flag("web.user-assets", "Path to user asset directory, available at /user.").
- 		PlaceHolder("<path>").StringVar(&cfg.web.UserAssetsPath)
- 
---- a/console_libraries/prom.lib
-+++ b/console_libraries/prom.lib
-@@ -1,16 +1,15 @@
- {{/* vim: set ft=html: */}}
- {{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
- {{ define "prom_console_head" }}
--<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css">
--<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
-+<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.css">
-+<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/bootstrap4/css/bootstrap.min.css">
- <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prom_console.css">
--<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
--<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js"></script>
--<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
--<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js"></script>
--<script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js"></script>
--<script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js"></script>
--<script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
-+<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css">
-+<script src="{{ pathPrefix }}/classic/static/d3.min.js"></script>
-+<script src="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.js"></script>
-+<script src="{{ pathPrefix }}/classic/static/jquery/jquery.min.js"></script>
-+<script src="{{ pathPrefix }}/classic/static/popper.js/popper.min.js"></script>
-+<script src="{{ pathPrefix }}/classic/static/bootstrap4/js/bootstrap.min.js"></script>
- 
- <script>
- var PATH_PREFIX = "{{ pathPrefix }}";
---- a/web/ui/templates/graph.html
-+++ b/web/ui/templates/graph.html
-@@ -1,19 +1,18 @@
- {{define "head"}}
--    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css?v={{ buildVersion }}">
--    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.css?v={{ buildVersion }}">
-+    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.css?v={{ buildVersion }}">
-+    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.css?v={{ buildVersion }}">
- 
--    <script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js?v={{ buildVersion }}"></script>
--    <script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js?v={{ buildVersion }}"></script>
--    <script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js?v={{ buildVersion }}"></script>
--    <script src="{{ pathPrefix }}/classic/static/vendor/moment/moment.min.js?v={{ buildVersion }}"></script>
--    <script src="{{ pathPrefix }}/classic/static/vendor/moment/moment-timezone-with-data.min.js?v={{ buildVersion }}"></script>
--    <script src="{{ pathPrefix }}/classic/static/vendor/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.js?v={{ buildVersion }}"></script>
--    <script src="{{ pathPrefix }}/classic/static/vendor/bootstrap3-typeahead/bootstrap3-typeahead.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/d3/d3.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/moment/moment.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/moment-timezone/moment-timezone-with-data.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/vendor/bootstrap3-typeahead/bootstrap3-typeahead.js?v={{ buildVersion }}"></script>
-     <script src="{{ pathPrefix }}/classic/static/vendor/fuzzy/fuzzy.js?v={{ buildVersion }}"></script>
- 
--    <script src="{{ pathPrefix }}/classic/static/vendor/mustache/mustache.min.js?v={{ buildVersion }}"></script>
-+    <script src="{{ pathPrefix }}/classic/static/mustache/mustache.min.js?v={{ buildVersion }}"></script>
-     <script src="{{ pathPrefix }}/classic/static/vendor/js/jquery.selection.js?v={{ buildVersion }}"></script>
--    <!-- <script src="{{ pathPrefix }}/classic/static/vendor/js/jquery.hotkeys.js?v={{ buildVersion }}"></script> -->
-+    <!-- <script src="{{ pathPrefix }}/classic/static/jquery-hotkeys/jquery.hotkeys.js?v={{ buildVersion }}"></script> -->
- 
-     <script src="{{ pathPrefix }}/classic/static/js/graph/index.js?v={{ buildVersion }}"></script>
- 
-@@ -29,7 +28,6 @@
-           <i class="glyphicon glyphicon-unchecked"></i>
-           <button type="button" class="search-history" title="search previous queries">Enable query history</button>
-         </div>
--        <button type="button" class="btn btn-link btn-sm new_ui_button" onclick="window.location.pathname='{{ pathPrefix }}/graph'">Back to the new UI</button>
-       </div>
-     </div>
- 
---- a/web/web.go
-+++ b/web/web.go
-@@ -14,7 +14,6 @@
- package web
- 
- import (
--	"bytes"
- 	"context"
- 	"encoding/json"
- 	"fmt"
-@@ -49,7 +48,6 @@ import (
- 	io_prometheus_client "github.com/prometheus/client_model/go"
- 	"github.com/prometheus/common/model"
- 	"github.com/prometheus/common/route"
--	"github.com/prometheus/common/server"
- 	toolkit_web "github.com/prometheus/exporter-toolkit/web"
- 	"go.uber.org/atomic"
- 	"golang.org/x/net/netutil"
-@@ -65,23 +63,8 @@ import (
- 	"github.com/prometheus/prometheus/tsdb/index"
- 	"github.com/prometheus/prometheus/util/httputil"
- 	api_v1 "github.com/prometheus/prometheus/web/api/v1"
--	"github.com/prometheus/prometheus/web/ui"
- )
- 
--// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
--var reactRouterPaths = []string{
--	"/alerts",
--	"/config",
--	"/flags",
--	"/graph",
--	"/rules",
--	"/service-discovery",
--	"/status",
--	"/targets",
--	"/tsdb-status",
--	"/starting",
--}
--
- // withStackTrace logs the stack trace in case the request panics. The function
- // will re-raise the error which will then be handled by the net/http package.
- // It is needed because the go-kit log package doesn't manage properly the
-@@ -237,7 +220,7 @@ type Options struct {
- 	MaxConnections             int
- 	ExternalURL                *url.URL
- 	RoutePrefix                string
--	UseLocalAssets             bool
-+	LocalAssets                string
- 	UserAssetsPath             string
- 	ConsoleTemplatesPath       string
- 	ConsoleLibrariesPath       string
-@@ -346,16 +329,15 @@ func New(logger log.Logger, o *Options)
- 	readyf := h.testReady
- 
- 	router.Get("/", func(w http.ResponseWriter, r *http.Request) {
--		http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/graph"), http.StatusFound)
-+		http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/classic/graph"), http.StatusFound)
- 	})
- 	router.Get("/classic/", func(w http.ResponseWriter, r *http.Request) {
- 		http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/classic/graph"), http.StatusFound)
- 	})
- 
--	// Redirect the original React UI's path (under "/new") to its new path at the root.
--	router.Get("/new/*path", func(w http.ResponseWriter, r *http.Request) {
--		p := route.Param(r.Context(), "path")
--		http.Redirect(w, r, path.Join(o.ExternalURL.Path, p)+"?"+r.URL.RawQuery, http.StatusFound)
-+	// Catch requests to legacy URLs that would try to hit the "new" web UI
-+	router.Get("/graph/", func(w http.ResponseWriter, r *http.Request) {
-+		http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/classic/graph")+"?"+r.URL.RawQuery, http.StatusFound)
- 	})
- 
- 	router.Get("/classic/alerts", readyf(h.alerts))
-@@ -366,11 +348,7 @@ func New(logger log.Logger, o *Options)
- 	router.Get("/classic/rules", readyf(h.rules))
- 	router.Get("/classic/targets", readyf(h.targets))
- 	router.Get("/classic/service-discovery", readyf(h.serviceDiscovery))
--	router.Get("/classic/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
--		r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
--		fs := server.StaticFileServer(ui.Assets)
--		fs.ServeHTTP(w, r)
--	})
-+	router.Get("/classic/static/*filepath", route.FileServe(path.Join(o.LocalAssets, "/static")))
- 	// Make sure that "<path-prefix>/classic" is redirected to "<path-prefix>/classic/" and
- 	// not just the naked "/classic/", which would be the default behavior of the router
- 	// with the "RedirectTrailingSlash" option (https://pkg.go.dev/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
-@@ -390,48 +368,6 @@ func New(logger log.Logger, o *Options)
- 
- 	router.Get("/consoles/*filepath", readyf(h.consoles))
- 
--	serveReactApp := func(w http.ResponseWriter, r *http.Request) {
--		f, err := ui.Assets.Open("/static/react/index.html")
--		if err != nil {
--			w.WriteHeader(http.StatusInternalServerError)
--			fmt.Fprintf(w, "Error opening React index.html: %v", err)
--			return
--		}
--		defer func() { _ = f.Close() }()
--		idx, err := ioutil.ReadAll(f)
--		if err != nil {
--			w.WriteHeader(http.StatusInternalServerError)
--			fmt.Fprintf(w, "Error reading React index.html: %v", err)
--			return
--		}
--		replacedIdx := bytes.ReplaceAll(idx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
--		replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("TITLE_PLACEHOLDER"), []byte(h.options.PageTitle))
--		w.Write(replacedIdx)
--	}
--
--	// Serve the React app.
--	for _, p := range reactRouterPaths {
--		router.Get(p, serveReactApp)
--	}
--
--	// The favicon and manifest are bundled as part of the React app, but we want to serve
--	// them on the root.
--	for _, p := range []string{"/favicon.ico", "/manifest.json"} {
--		assetPath := "/static/react" + p
--		router.Get(p, func(w http.ResponseWriter, r *http.Request) {
--			r.URL.Path = assetPath
--			fs := server.StaticFileServer(ui.Assets)
--			fs.ServeHTTP(w, r)
--		})
--	}
--
--	// Static files required by the React app.
--	router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
--		r.URL.Path = path.Join("/static/react/static", route.Param(r.Context(), "filepath"))
--		fs := server.StaticFileServer(ui.Assets)
--		fs.ServeHTTP(w, r)
--	})
--
- 	if o.UserAssetsPath != "" {
- 		router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
- 	}
-@@ -1076,7 +1012,7 @@ func (h *Handler) getTemplate(name strin
- 	var tmpl string
- 
- 	appendf := func(name string) error {
--		f, err := ui.Assets.Open(path.Join("/templates", name))
-+		f, err := os.Open(filepath.Join(h.options.LocalAssets, "templates", name))
- 		if err != nil {
- 			return err
- 		}
---- a/web/web_test.go
-+++ b/web/web_test.go
-@@ -139,6 +139,7 @@ func TestReadyAndHealthy(t *testing.T) {
- 		},
- 		Version:  &PrometheusVersion{},
- 		Gatherer: prometheus.DefaultGatherer,
-+		LocalAssets:    "../../../../../../web/ui",
- 	}
- 
- 	opts.Flags = map[string]string{}
---- a/web/ui/templates/_base.html
-+++ b/web/ui/templates/_base.html
-@@ -5,13 +5,13 @@
-         <meta name="robots" content="noindex,nofollow">
-         <title>{{ pageTitle }}</title>
-         <link rel="shortcut icon" href="{{ pathPrefix }}/classic/static/img/favicon.ico?v={{ buildVersion }}">
--        <script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js?v={{ buildVersion }}"></script>
--        <script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js?v={{ buildVersion }}"></script>
--        <script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js?v={{ buildVersion }}"></script>
-+        <script src="{{ pathPrefix }}/classic/static/jquery/jquery.min.js?v={{ buildVersion }}"></script>
-+        <script src="{{ pathPrefix }}/classic/static/popper.js/popper.min.js?v={{ buildVersion }}"></script>
-+        <script src="{{ pathPrefix }}/classic/static/bootstrap4/js/bootstrap.min.js?v={{ buildVersion }}"></script>
- 
--        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css?v={{ buildVersion }}">
-+        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/bootstrap4/css/bootstrap.min.css?v={{ buildVersion }}">
-         <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prometheus.css?v={{ buildVersion }}">
--        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css?v={{ buildVersion }}">
-+        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css?v={{ buildVersion }}">
- 
-         <script>
-             var PATH_PREFIX = "{{ pathPrefix }}";
-@@ -61,7 +61,6 @@
-                         <li class= "nav-item">
-                             <a class ="nav-link" href="https://prometheus.io/docs/prometheus/latest/getting_started/" target="_blank">Help</a>
-                         </li>
--                        <li class="nav-item"><a class="nav-link" href="{{ pathPrefix }}/graph">New UI</a></li>
-                     </ul>
-                 </div>
-             </div>
---- a/web/ui/doc.go
-+++ b/web/ui/doc.go
-@@ -11,6 +11,8 @@
- // See the License for the specific language governing permissions and
- // limitations under the License.
- 
-+// +build ignore
-+
- // Package ui provides the assets via a virtual filesystem.
- package ui
- 
---- a/web/ui/ui.go
-+++ b/web/ui/ui.go
-@@ -11,8 +11,7 @@
- // See the License for the specific language governing permissions and
- // limitations under the License.
- 
--//go:build !builtinassets
--// +build !builtinassets
-+// +build ignore
- 
- package ui
- 
diff -pruN 2.31.2+ds1-1/debian/patches/02-Default_settings.patch 2.33.5+ds1-2/debian/patches/02-Default_settings.patch
--- 2.31.2+ds1-1/debian/patches/02-Default_settings.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/02-Default_settings.patch	2022-03-23 22:28:20.000000000 +0000
@@ -2,13 +2,15 @@ From: Martina Ferrari <tina@debian.org>
 Date: Sat, 20 Jun 2020 15:14:34 -0300
 Subject: Add default settings adapted for Debian.
 Forwarded: not-needed
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
+
 ---
  cmd/prometheus/main.go |   10 +++++-----
  1 file changed, 5 insertions(+), 5 deletions(-)
 
 --- a/cmd/prometheus/main.go
 +++ b/cmd/prometheus/main.go
-@@ -201,7 +201,7 @@ func main() {
+@@ -226,7 +226,7 @@
  	a.HelpFlag.Short('h')
  
  	a.Flag("config.file", "Prometheus configuration file path.").
@@ -17,7 +19,7 @@ Forwarded: not-needed
  
  	a.Flag("web.listen-address", "Address to listen on for UI, API, and telemetry.").
  		Default("0.0.0.0:9090").StringVar(&cfg.web.ListenAddress)
-@@ -223,7 +223,7 @@ func main() {
+@@ -248,7 +248,7 @@
  		"Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").
  		PlaceHolder("<path>").StringVar(&cfg.web.RoutePrefix)
  
@@ -26,8 +28,8 @@ Forwarded: not-needed
  		PlaceHolder("<path>").StringVar(&cfg.web.UserAssetsPath)
  
  	a.Flag("web.enable-lifecycle", "Enable shutdown and reload via HTTP request.").
-@@ -233,10 +233,10 @@ func main() {
- 		Default("false").BoolVar(&cfg.web.EnableAdminAPI)
+@@ -261,10 +261,10 @@
+ 		Default("false").BoolVar(&cfg.web.EnableRemoteWriteReceiver)
  
  	a.Flag("web.console.templates", "Path to the console template directory, available at /consoles.").
 -		Default("consoles").StringVar(&cfg.web.ConsoleTemplatesPath)
@@ -39,12 +41,21 @@ Forwarded: not-needed
  
  	a.Flag("web.page-title", "Document title of Prometheus instance.").
  		Default("Prometheus Time Series Collection and Processing Server").StringVar(&cfg.web.PageTitle)
-@@ -245,7 +245,7 @@ func main() {
+@@ -273,7 +273,7 @@
  		Default(".*").StringVar(&cfg.corsRegexString)
  
- 	a.Flag("storage.tsdb.path", "Base path for metrics storage.").
--		Default("data/").StringVar(&cfg.localStoragePath)
-+		Default("/var/lib/prometheus/metrics2/").StringVar(&cfg.localStoragePath)
+ 	serverOnlyFlag(a, "storage.tsdb.path", "Base path for metrics storage.").
+-		Default("data/").StringVar(&cfg.serverStoragePath)
++		Default("/var/lib/prometheus/metrics2/").StringVar(&cfg.serverStoragePath)
  
- 	a.Flag("storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
+ 	serverOnlyFlag(a, "storage.tsdb.min-block-duration", "Minimum duration of a data block before being persisted. For use in testing.").
  		Hidden().Default("2h").SetValue(&cfg.tsdb.MinBlockDuration)
+@@ -309,7 +309,7 @@
+ 		Hidden().Default("true").BoolVar(&cfg.tsdb.WALCompression)
+ 
+ 	agentOnlyFlag(a, "storage.agent.path", "Base path for metrics storage.").
+-		Default("data-agent/").StringVar(&cfg.agentStoragePath)
++		Default("/var/lib/prometheus/data-agent/").StringVar(&cfg.agentStoragePath)
+ 
+ 	agentOnlyFlag(a, "storage.agent.wal-segment-size",
+ 		"Size at which to split WAL segment files. Example: 100MB").
diff -pruN 2.31.2+ds1-1/debian/patches/03-Disable_kubernetes.patch 2.33.5+ds1-2/debian/patches/03-Disable_kubernetes.patch
--- 2.31.2+ds1-1/debian/patches/03-Disable_kubernetes.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/03-Disable_kubernetes.patch	2022-03-23 22:28:20.000000000 +0000
@@ -1,6 +1,8 @@
 From: Martina Ferrari <tina@debian.org>
 Date: Sat, 20 Jun 2020 15:49:53 -0300
 Subject: Disable kubernetes
+Forwarded: not-needed
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 Disable kubernetes SD, until dependencies are more reasonable.
 
@@ -31,7 +33,7 @@ Disable kubernetes SD, until dependencie
 
 --- a/cmd/prometheus/main.go
 +++ b/cmd/prometheus/main.go
-@@ -52,8 +52,6 @@ import (
+@@ -52,8 +52,6 @@
  	jprom "github.com/uber/jaeger-lib/metrics/prometheus"
  	"go.uber.org/atomic"
  	kingpin "gopkg.in/alecthomas/kingpin.v2"
@@ -40,7 +42,7 @@ Disable kubernetes SD, until dependencie
  
  	"github.com/prometheus/prometheus/config"
  	"github.com/prometheus/prometheus/discovery"
-@@ -435,12 +433,6 @@ func main() {
+@@ -503,12 +501,6 @@
  	noStepSubqueryInterval := &safePromQLNoStepSubqueryInterval{}
  	noStepSubqueryInterval.Set(config.DefaultGlobalConfig.EvaluationInterval)
  
@@ -55,28 +57,28 @@ Disable kubernetes SD, until dependencie
  		level.Warn(logger).Log("msg", "This Prometheus binary has not been compiled for a 64-bit architecture. Due to virtual memory constraints of 32-bit systems, it is highly recommended to switch to a 64-bit binary of Prometheus.", "GOARCH", runtime.GOARCH)
 --- a/cmd/promtool/main.go
 +++ b/cmd/promtool/main.go
-@@ -46,7 +46,6 @@ import (
- 	"github.com/prometheus/prometheus/config"
+@@ -52,7 +52,6 @@
+ 	"github.com/prometheus/prometheus/discovery"
  	"github.com/prometheus/prometheus/discovery/file"
  	_ "github.com/prometheus/prometheus/discovery/install" // Register service discovery implementations.
 -	"github.com/prometheus/prometheus/discovery/kubernetes"
  	"github.com/prometheus/prometheus/discovery/targetgroup"
- 	"github.com/prometheus/prometheus/pkg/labels"
- 	"github.com/prometheus/prometheus/pkg/rulefmt"
-@@ -351,10 +350,6 @@ func checkConfig(filename string) ([]str
+ 	"github.com/prometheus/prometheus/model/labels"
+ 	"github.com/prometheus/prometheus/model/rulefmt"
+@@ -387,10 +386,6 @@
  
  		for _, c := range scfg.ServiceDiscoveryConfigs {
  			switch c := c.(type) {
 -			case *kubernetes.SDConfig:
--				if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig); err != nil {
+-				if err := checkTLSConfig(c.HTTPClientConfig.TLSConfig, checkSyntaxOnly); err != nil {
 -					return nil, err
 -				}
  			case *file.SDConfig:
- 				for _, file := range c.Files {
- 					files, err := filepath.Glob(file)
+ 				if checkSyntaxOnly {
+ 					break
 --- a/config/config_test.go
 +++ b/config/config_test.go
-@@ -40,7 +40,6 @@ import (
+@@ -40,7 +40,6 @@
  	"github.com/prometheus/prometheus/discovery/file"
  	"github.com/prometheus/prometheus/discovery/hetzner"
  	"github.com/prometheus/prometheus/discovery/http"
@@ -84,7 +86,7 @@ Disable kubernetes SD, until dependencie
  	"github.com/prometheus/prometheus/discovery/linode"
  	"github.com/prometheus/prometheus/discovery/marathon"
  	"github.com/prometheus/prometheus/discovery/moby"
-@@ -387,66 +386,6 @@ var expectedConf = &Config{
+@@ -386,66 +385,6 @@
  			},
  		},
  		{
@@ -151,7 +153,7 @@ Disable kubernetes SD, until dependencie
  			JobName: "service-kuma",
  
  			HonorTimestamps: true,
-@@ -1044,7 +983,7 @@ func TestElideSecrets(t *testing.T) {
+@@ -1046,7 +985,7 @@
  	yamlConfig := string(config)
  
  	matches := secretRe.FindAllStringIndex(yamlConfig, -1)
@@ -160,90 +162,112 @@ Disable kubernetes SD, until dependencie
  	require.NotContains(t, yamlConfig, "mysecret",
  		"yaml marshal reveals authentication credentials.")
  }
-@@ -1056,29 +995,6 @@ func TestLoadConfigRuleFilesAbsolutePath
+@@ -1058,29 +997,6 @@
  	require.Equal(t, ruleFilesExpectedConf, c)
  }
  
 -func TestKubernetesEmptyAPIServer(t *testing.T) {
--	_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, log.NewNopLogger())
+-	_, err := LoadFile("testdata/kubernetes_empty_apiserver.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
 -}
 -
 -func TestKubernetesWithKubeConfig(t *testing.T) {
--	_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, log.NewNopLogger())
+-	_, err := LoadFile("testdata/kubernetes_kubeconfig_without_apiserver.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
 -}
 -
 -func TestKubernetesSelectors(t *testing.T) {
--	_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, log.NewNopLogger())
+-	_, err := LoadFile("testdata/kubernetes_selectors_endpoints.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
--	_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, log.NewNopLogger())
+-	_, err = LoadFile("testdata/kubernetes_selectors_node.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
--	_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, log.NewNopLogger())
+-	_, err = LoadFile("testdata/kubernetes_selectors_ingress.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
--	_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, log.NewNopLogger())
+-	_, err = LoadFile("testdata/kubernetes_selectors_pod.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
--	_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, log.NewNopLogger())
+-	_, err = LoadFile("testdata/kubernetes_selectors_service.good.yml", false, false, log.NewNopLogger())
 -	require.NoError(t, err)
 -}
 -
  var expectedErrors = []struct {
  	filename string
  	errMsg   string
-@@ -1153,52 +1069,6 @@ var expectedErrors = []struct {
- 		filename: "bearertoken_basicauth.bad.yml",
+@@ -1178,74 +1094,6 @@
  		errMsg:   "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
- 	}, {
+ 	},
+ 	{
 -		filename: "kubernetes_http_config_without_api_server.bad.yml",
 -		errMsg:   "to use custom HTTP client configuration please provide the 'api_server' URL explicitly",
--	}, {
+-	},
+-	{
+-		filename: "kubernetes_kubeconfig_with_own_namespace.bad.yml",
+-		errMsg:   "cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously",
+-	},
+-	{
+-		filename: "kubernetes_api_server_with_own_namespace.bad.yml",
+-		errMsg:   "cannot use 'api_server' and 'namespaces.own_namespace' simultaneously",
+-	},
+-	{
 -		filename: "kubernetes_kubeconfig_with_apiserver.bad.yml",
 -		errMsg:   "cannot use 'kubeconfig_file' and 'api_server' simultaneously",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_kubeconfig_with_http_config.bad.yml",
 -		errMsg:   "cannot use a custom HTTP client configuration together with 'kubeconfig_file'",
 -	},
 -	{
 -		filename: "kubernetes_bearertoken.bad.yml",
 -		errMsg:   "at most one of bearer_token & bearer_token_file must be configured",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_role.bad.yml",
 -		errMsg:   "role",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_endpoints.bad.yml",
 -		errMsg:   "endpoints role supports only pod, service, endpoints selectors",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_ingress.bad.yml",
 -		errMsg:   "ingress role supports only ingress selectors",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_node.bad.yml",
 -		errMsg:   "node role supports only node selectors",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_pod.bad.yml",
 -		errMsg:   "pod role supports only pod selectors",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_service.bad.yml",
 -		errMsg:   "service role supports only service selectors",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_namespace_discovery.bad.yml",
 -		errMsg:   "field foo not found in type kubernetes.plain",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_duplicated_role.bad.yml",
 -		errMsg:   "duplicated selector role: pod",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_selectors_incorrect_selector.bad.yml",
 -		errMsg:   "invalid selector: 'metadata.status-Running'; can't understand 'metadata.status-Running'",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_bearertoken_basicauth.bad.yml",
 -		errMsg:   "at most one of basic_auth, oauth2, bearer_token & bearer_token_file must be configured",
--	}, {
+-	},
+-	{
 -		filename: "kubernetes_authorization_basicauth.bad.yml",
 -		errMsg:   "at most one of basic_auth, oauth2 & authorization must be configured",
--	}, {
+-	},
+-	{
  		filename: "marathon_no_servers.bad.yml",
  		errMsg:   "marathon_sd: must contain at least one Marathon server",
- 	}, {
-@@ -1441,8 +1311,3 @@ func TestEmptyGlobalBlock(t *testing.T)
+ 	},
+@@ -1509,8 +1357,3 @@
  	exp := DefaultConfig
  	require.Equal(t, exp, *c)
  }
@@ -254,7 +278,7 @@ Disable kubernetes SD, until dependencie
 -}
 --- a/config/testdata/conf.good.yml
 +++ b/config/testdata/conf.good.yml
-@@ -169,32 +169,6 @@ scrape_configs:
+@@ -169,32 +169,6 @@
      authorization:
        credentials: mysecret
  
@@ -289,7 +313,7 @@ Disable kubernetes SD, until dependencie
      kuma_sd_configs:
 --- a/config/testdata/roundtrip.good.yml
 +++ b/config/testdata/roundtrip.good.yml
-@@ -88,24 +88,6 @@ scrape_configs:
+@@ -88,24 +88,6 @@
        - files:
            - single/file.yml
  
@@ -316,7 +340,7 @@ Disable kubernetes SD, until dependencie
  
 --- a/discovery/install/install.go
 +++ b/discovery/install/install.go
-@@ -26,7 +26,6 @@ import (
+@@ -26,7 +26,6 @@
  	_ "github.com/prometheus/prometheus/discovery/gce"          // register gce
  	_ "github.com/prometheus/prometheus/discovery/hetzner"      // register hetzner
  	_ "github.com/prometheus/prometheus/discovery/http"         // register http
diff -pruN 2.31.2+ds1-1/debian/patches/05-Fix-test-prom-invocations.patch 2.33.5+ds1-2/debian/patches/05-Fix-test-prom-invocations.patch
--- 2.31.2+ds1-1/debian/patches/05-Fix-test-prom-invocations.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/05-Fix-test-prom-invocations.patch	2022-03-23 22:28:20.000000000 +0000
@@ -1,8 +1,10 @@
 From: Martina Ferrari <tina@debian.org>
 Date: Fri, 5 Jun 2020 15:36:25 -0300
 Subject: Fix prometheus invocations, paths for debian build system, and
+ avoid recompiling prometheus.
+Forwarded: not-needed
+Last-Updated: Wed, 16 Mar 2022 05:48:22 +0000
 
-avoid recompiling prometheus.
 ---
  cmd/prometheus/main_test.go      |    8 +--
  cmd/prometheus/main_unix_test.go |    4 -
@@ -11,7 +13,7 @@ avoid recompiling prometheus.
 
 --- a/cmd/prometheus/main_test.go
 +++ b/cmd/prometheus/main_test.go
-@@ -96,7 +96,7 @@ func TestComputeExternalURL(t *testing.T
+@@ -99,7 +99,7 @@
  	}
  
  	for _, test := range tests {
@@ -20,7 +22,7 @@ avoid recompiling prometheus.
  		if test.valid {
  			require.NoError(t, err)
  		} else {
-@@ -153,7 +153,7 @@ func TestSendAlerts(t *testing.T) {
+@@ -156,7 +156,7 @@
  					Annotations:  []labels.Label{{Name: "a2", Value: "v2"}},
  					StartsAt:     time.Unix(2, 0),
  					EndsAt:       time.Unix(3, 0),
@@ -29,7 +31,7 @@ avoid recompiling prometheus.
  				},
  			},
  		},
-@@ -173,7 +173,7 @@ func TestSendAlerts(t *testing.T) {
+@@ -176,7 +176,7 @@
  					Annotations:  []labels.Label{{Name: "a2", Value: "v2"}},
  					StartsAt:     time.Unix(2, 0),
  					EndsAt:       time.Unix(4, 0),
@@ -38,7 +40,7 @@ avoid recompiling prometheus.
  				},
  			},
  		},
-@@ -191,7 +191,7 @@ func TestSendAlerts(t *testing.T) {
+@@ -194,7 +194,7 @@
  				}
  				require.Equal(t, tc.exp, alerts)
  			})
@@ -47,29 +49,45 @@ avoid recompiling prometheus.
  		})
  	}
  }
---- a/cmd/prometheus/main_unix_test.go
-+++ b/cmd/prometheus/main_unix_test.go
-@@ -31,7 +31,7 @@ func TestStartupInterrupt(t *testing.T)
- 		t.Skip("skipping test in short mode.")
- 	}
+@@ -351,7 +351,7 @@
+ }
+ 
+ func TestAgentSuccessfulStartup(t *testing.T) {
+-	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig)
++	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+agentConfig, "--web.listen-address=0.0.0.0:0", "--storage.agent.path="+filepath.Join(t.TempDir(), "data"))
+ 	require.NoError(t, prom.Start())
  
--	prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+promData)
-+	prom := exec.Command(promPath, "-test.main", "--config.file="+promConfig, "--storage.tsdb.path="+promData, "--web.listen-address=:9099")
- 	err := prom.Start()
- 	if err != nil {
- 		t.Errorf("execution error: %v", err)
-@@ -50,7 +50,7 @@ Loop:
- 	for x := 0; x < 10; x++ {
- 		// error=nil means prometheus has started so we can send the interrupt
- 		// signal and wait for the graceful shutdown.
--		if _, err := http.Get("http://localhost:9090/graph"); err == nil {
-+		if _, err := http.Get("http://localhost:9099/graph"); err == nil {
- 			startedOk = true
- 			prom.Process.Signal(os.Interrupt)
- 			select {
+ 	actualExitStatus := 0
+@@ -369,7 +369,7 @@
+ }
+ 
+ func TestAgentFailedStartupWithServerFlag(t *testing.T) {
+-	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig)
++	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--storage.tsdb.path=.", "--config.file="+promConfig, "--web.listen-address=0.0.0.0:0")
+ 
+ 	output := bytes.Buffer{}
+ 	prom.Stderr = &output
+@@ -396,7 +396,7 @@
+ }
+ 
+ func TestAgentFailedStartupWithInvalidConfig(t *testing.T) {
+-	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig)
++	prom := exec.Command(promPath, "-test.main", "--enable-feature=agent", "--config.file="+promConfig, "--web.listen-address=0.0.0.0:0")
+ 	require.NoError(t, prom.Start())
+ 
+ 	actualExitStatus := 0
+@@ -431,7 +431,7 @@
+ 
+ 	for _, tc := range testcases {
+ 		t.Run(fmt.Sprintf("%s mode with option %s", tc.mode, tc.arg), func(t *testing.T) {
+-			args := []string{"-test.main", tc.arg, t.TempDir()}
++			args := []string{"-test.main", tc.arg, t.TempDir(), "--web.listen-address=0.0.0.0:0"}
+ 
+ 			if tc.mode == "agent" {
+ 				args = append(args, "--enable-feature=agent", "--config.file="+agentConfig)
 --- a/web/web_test.go
 +++ b/web/web_test.go
-@@ -50,7 +50,7 @@ func TestMain(m *testing.M) {
+@@ -51,7 +51,7 @@
  
  func TestGlobalURL(t *testing.T) {
  	opts := &Options{
@@ -78,7 +96,7 @@ avoid recompiling prometheus.
  		ExternalURL: &url.URL{
  			Scheme: "https",
  			Host:   "externalhost:80",
-@@ -64,12 +64,12 @@ func TestGlobalURL(t *testing.T) {
+@@ -65,12 +65,12 @@
  	}{
  		{
  			// Nothing should change if the input URL is not on localhost, even if the port is our listening port.
@@ -94,7 +112,7 @@ avoid recompiling prometheus.
  			outURL: "https://externalhost:80/metrics",
  		},
  		{
-@@ -79,7 +79,7 @@ func TestGlobalURL(t *testing.T) {
+@@ -80,7 +80,7 @@
  		},
  		{
  			// Alternative localhost representations should also work.
@@ -103,120 +121,7 @@ avoid recompiling prometheus.
  			outURL: "https://externalhost:80/metrics",
  		},
  	}
-@@ -119,7 +119,7 @@ func TestReadyAndHealthy(t *testing.T) {
- 	require.NoError(t, err)
- 
- 	opts := &Options{
--		ListenAddress:  ":9090",
-+		ListenAddress:  ":9098",
- 		ReadTimeout:    30 * time.Second,
- 		MaxConnections: 512,
- 		Context:        nil,
-@@ -134,7 +134,7 @@ func TestReadyAndHealthy(t *testing.T) {
- 		EnableAdminAPI: true,
- 		ExternalURL: &url.URL{
- 			Scheme: "http",
--			Host:   "localhost:9090",
-+			Host:   "localhost:9098",
- 			Path:   "/",
- 		},
- 		Version:  &PrometheusVersion{},
-@@ -165,20 +165,20 @@ func TestReadyAndHealthy(t *testing.T) {
- 	// to be up before starting tests.
- 	time.Sleep(5 * time.Second)
- 
--	resp, err := http.Get("http://localhost:9090/-/healthy")
-+	resp, err := http.Get("http://localhost:9098/-/healthy")
- 	require.NoError(t, err)
- 	require.Equal(t, http.StatusOK, resp.StatusCode)
- 	cleanupTestResponse(t, resp)
- 
- 	for _, u := range []string{
--		"http://localhost:9090/-/ready",
--		"http://localhost:9090/classic/graph",
--		"http://localhost:9090/classic/flags",
--		"http://localhost:9090/classic/rules",
--		"http://localhost:9090/classic/service-discovery",
--		"http://localhost:9090/classic/targets",
--		"http://localhost:9090/classic/status",
--		"http://localhost:9090/classic/config",
-+		"http://localhost:9098/-/ready",
-+		"http://localhost:9098/classic/graph",
-+		"http://localhost:9098/classic/flags",
-+		"http://localhost:9098/classic/rules",
-+		"http://localhost:9098/classic/service-discovery",
-+		"http://localhost:9098/classic/targets",
-+		"http://localhost:9098/classic/status",
-+		"http://localhost:9098/classic/config",
- 	} {
- 		resp, err = http.Get(u)
- 		require.NoError(t, err)
-@@ -186,12 +186,12 @@ func TestReadyAndHealthy(t *testing.T) {
- 		cleanupTestResponse(t, resp)
- 	}
- 
--	resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
-+	resp, err = http.Post("http://localhost:9098/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
- 	require.NoError(t, err)
- 	require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
- 	cleanupTestResponse(t, resp)
- 
--	resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/delete_series", "", strings.NewReader("{}"))
-+	resp, err = http.Post("http://localhost:9098/api/v1/admin/tsdb/delete_series", "", strings.NewReader("{}"))
- 	require.NoError(t, err)
- 	require.Equal(t, http.StatusServiceUnavailable, resp.StatusCode)
- 	cleanupTestResponse(t, resp)
-@@ -200,15 +200,15 @@ func TestReadyAndHealthy(t *testing.T) {
- 	webHandler.Ready()
- 
- 	for _, u := range []string{
--		"http://localhost:9090/-/healthy",
--		"http://localhost:9090/-/ready",
--		"http://localhost:9090/classic/graph",
--		"http://localhost:9090/classic/flags",
--		"http://localhost:9090/classic/rules",
--		"http://localhost:9090/classic/service-discovery",
--		"http://localhost:9090/classic/targets",
--		"http://localhost:9090/classic/status",
--		"http://localhost:9090/classic/config",
-+		"http://localhost:9098/-/healthy",
-+		"http://localhost:9098/-/ready",
-+		"http://localhost:9098/classic/graph",
-+		"http://localhost:9098/classic/flags",
-+		"http://localhost:9098/classic/rules",
-+		"http://localhost:9098/classic/service-discovery",
-+		"http://localhost:9098/classic/targets",
-+		"http://localhost:9098/classic/status",
-+		"http://localhost:9098/classic/config",
- 	} {
- 		resp, err = http.Get(u)
- 		require.NoError(t, err)
-@@ -216,13 +216,13 @@ func TestReadyAndHealthy(t *testing.T) {
- 		cleanupTestResponse(t, resp)
- 	}
- 
--	resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
-+	resp, err = http.Post("http://localhost:9098/api/v1/admin/tsdb/snapshot", "", strings.NewReader(""))
- 	require.NoError(t, err)
- 	require.Equal(t, http.StatusOK, resp.StatusCode)
- 	cleanupSnapshot(t, dbDir, resp)
- 	cleanupTestResponse(t, resp)
- 
--	resp, err = http.Post("http://localhost:9090/api/v1/admin/tsdb/delete_series?match[]=up", "", nil)
-+	resp, err = http.Post("http://localhost:9098/api/v1/admin/tsdb/delete_series?match[]=up", "", nil)
- 	require.NoError(t, err)
- 	require.Equal(t, http.StatusNoContent, resp.StatusCode)
- 	cleanupTestResponse(t, resp)
-@@ -252,7 +252,7 @@ func TestRoutePrefix(t *testing.T) {
- 		RoutePrefix:    "/prometheus",
- 		EnableAdminAPI: true,
- 		ExternalURL: &url.URL{
--			Host:   "localhost.localdomain:9090",
-+			Host:   "localhost.localdomain:9098",
- 			Scheme: "http",
- 		},
- 	}
-@@ -338,9 +338,9 @@ func TestDebugHandler(t *testing.T) {
+@@ -348,9 +348,9 @@
  	} {
  		opts := &Options{
  			RoutePrefix:   tc.prefix,
@@ -228,7 +133,7 @@ avoid recompiling prometheus.
  				Scheme: "http",
  			},
  		}
-@@ -363,9 +363,9 @@ func TestHTTPMetrics(t *testing.T) {
+@@ -373,9 +373,9 @@
  	t.Parallel()
  	handler := New(nil, &Options{
  		RoutePrefix:   "/",
@@ -240,55 +145,3 @@ avoid recompiling prometheus.
  			Scheme: "http",
  		},
  	})
-@@ -405,7 +405,7 @@ func TestShutdownWithStaleConnection(t *
- 	timeout := 10 * time.Second
- 
- 	opts := &Options{
--		ListenAddress:  ":9090",
-+		ListenAddress:  ":9098",
- 		ReadTimeout:    timeout,
- 		MaxConnections: 512,
- 		Context:        nil,
-@@ -419,7 +419,7 @@ func TestShutdownWithStaleConnection(t *
- 		RoutePrefix:    "/",
- 		ExternalURL: &url.URL{
- 			Scheme: "http",
--			Host:   "localhost:9090",
-+			Host:   "localhost:9098",
- 			Path:   "/",
- 		},
- 		Version:  &PrometheusVersion{},
-@@ -454,7 +454,7 @@ func TestShutdownWithStaleConnection(t *
- 
- 	// Open a socket, and don't use it. This connection should then be closed
- 	// after the ReadTimeout.
--	c, err := net.Dial("tcp", "localhost:9090")
-+	c, err := net.Dial("tcp", "localhost:9098")
- 	require.NoError(t, err)
- 	t.Cleanup(func() { require.NoError(t, c.Close()) })
- 
-@@ -470,13 +470,13 @@ func TestShutdownWithStaleConnection(t *
- 
- func TestHandleMultipleQuitRequests(t *testing.T) {
- 	opts := &Options{
--		ListenAddress:   ":9090",
-+		ListenAddress:   ":9098",
- 		MaxConnections:  512,
- 		EnableLifecycle: true,
- 		RoutePrefix:     "/",
- 		ExternalURL: &url.URL{
- 			Scheme: "http",
--			Host:   "localhost:9090",
-+			Host:   "localhost:9098",
- 			Path:   "/",
- 		},
- 	}
-@@ -508,7 +508,7 @@ func TestHandleMultipleQuitRequests(t *t
- 		go func() {
- 			defer wg.Done()
- 			<-start
--			resp, err := http.Post("http://localhost:9090/-/quit", "", strings.NewReader(""))
-+			resp, err := http.Post("http://localhost:9098/-/quit", "", strings.NewReader(""))
- 			require.NoError(t, err)
- 			require.Equal(t, http.StatusOK, resp.StatusCode)
- 		}()
diff -pruN 2.31.2+ds1-1/debian/patches/06-Disable_TSDB_lockfile.patch 2.33.5+ds1-2/debian/patches/06-Disable_TSDB_lockfile.patch
--- 2.31.2+ds1-1/debian/patches/06-Disable_TSDB_lockfile.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/06-Disable_TSDB_lockfile.patch	2022-03-23 22:28:20.000000000 +0000
@@ -1,6 +1,8 @@
 From: Martina Ferrari <tina@debian.org>
 Date: Sat, 20 Jun 2020 16:03:13 -0300
 Subject: Disable TSDB lockfile
+Forwarded: not-needed
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 Stop creating a tsdb lockfile by default. Replace
 storage.tsdb.no-lockfile flag with storage.tsdb.use-lockfile, so the
@@ -14,18 +16,29 @@ request the feature.
 
 --- a/cmd/prometheus/main.go
 +++ b/cmd/prometheus/main.go
-@@ -269,8 +269,8 @@ func main() {
- 	a.Flag("storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
+@@ -297,8 +297,8 @@
+ 	serverOnlyFlag(a, "storage.tsdb.retention.size", "Maximum number of bytes that can be stored for blocks. A unit is required, supported units: B, KB, MB, GB, TB, PB, EB. Ex: \"512MB\".").
  		BytesVar(&cfg.tsdb.MaxBytes)
  
--	a.Flag("storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
+-	serverOnlyFlag(a, "storage.tsdb.no-lockfile", "Do not create lockfile in data directory.").
 -		Default("false").BoolVar(&cfg.tsdb.NoLockfile)
-+	a.Flag("storage.tsdb.use-lockfile", "Create a lockfile in data directory.").
++	serverOnlyFlag(a, "storage.tsdb.use-lockfile", "Create a lockfile in data directory.").
 +		Default("false").BoolVar(&cfg.tsdb.UseLockfile)
  
- 	a.Flag("storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
+ 	serverOnlyFlag(a, "storage.tsdb.allow-overlapping-blocks", "Allow overlapping blocks, which in turn enables vertical compaction and vertical query merge.").
  		Default("false").BoolVar(&cfg.tsdb.AllowOverlappingBlocks)
-@@ -863,7 +863,7 @@ func main() {
+@@ -328,8 +328,8 @@
+ 		"Maximum age samples may be before being forcibly deleted when the WAL is truncated").
+ 		SetValue(&cfg.agent.MaxWALTime)
+ 
+-	agentOnlyFlag(a, "storage.agent.no-lockfile", "Do not create lockfile in data directory.").
+-		Default("false").BoolVar(&cfg.agent.NoLockfile)
++	agentOnlyFlag(a, "storage.agent.use-lockfile", "Create a lockfile in data directory.").
++		Default("false").BoolVar(&cfg.agent.UseLockfile)
+ 
+ 	a.Flag("storage.remote.flush-deadline", "How long to wait flushing sample on shutdown or config reload.").
+ 		Default("1m").PlaceHolder("<duration>").SetValue(&cfg.RemoteFlushDeadline)
+@@ -941,7 +941,7 @@
  					"MinBlockDuration", cfg.tsdb.MinBlockDuration,
  					"MaxBlockDuration", cfg.tsdb.MaxBlockDuration,
  					"MaxBytes", cfg.tsdb.MaxBytes,
@@ -34,7 +47,7 @@ request the feature.
  					"RetentionDuration", cfg.tsdb.RetentionDuration,
  					"WALSegmentSize", cfg.tsdb.WALSegmentSize,
  					"AllowOverlappingBlocks", cfg.tsdb.AllowOverlappingBlocks,
-@@ -1286,7 +1286,7 @@ type tsdbOptions struct {
+@@ -1462,7 +1462,7 @@
  	MaxBlockChunkSegmentSize       units.Base2Bytes
  	RetentionDuration              model.Duration
  	MaxBytes                       units.Base2Bytes
@@ -43,7 +56,7 @@ request the feature.
  	AllowOverlappingBlocks         bool
  	WALCompression                 bool
  	StripeSize                     int
-@@ -1303,7 +1303,7 @@ func (opts tsdbOptions) ToTSDBOptions()
+@@ -1479,7 +1479,7 @@
  		MaxBlockChunkSegmentSize:       int64(opts.MaxBlockChunkSegmentSize),
  		RetentionDuration:              int64(time.Duration(opts.RetentionDuration) / time.Millisecond),
  		MaxBytes:                       int64(opts.MaxBytes),
@@ -52,9 +65,27 @@ request the feature.
  		AllowOverlappingBlocks:         opts.AllowOverlappingBlocks,
  		WALCompression:                 opts.WALCompression,
  		StripeSize:                     opts.StripeSize,
+@@ -1499,7 +1499,7 @@
+ 	StripeSize             int
+ 	TruncateFrequency      model.Duration
+ 	MinWALTime, MaxWALTime model.Duration
+-	NoLockfile             bool
++	UseLockfile             bool
+ }
+ 
+ func (opts agentOptions) ToAgentOptions() agent.Options {
+@@ -1510,7 +1510,7 @@
+ 		TruncateFrequency: time.Duration(opts.TruncateFrequency),
+ 		MinWALTime:        durationToInt64Millis(time.Duration(opts.MinWALTime)),
+ 		MaxWALTime:        durationToInt64Millis(time.Duration(opts.MaxWALTime)),
+-		NoLockfile:        opts.NoLockfile,
++		UseLockfile:       opts.UseLockfile,
+ 	}
+ }
+ 
 --- a/tsdb/db.go
 +++ b/tsdb/db.go
-@@ -79,7 +79,7 @@ func DefaultOptions() *Options {
+@@ -74,7 +74,7 @@
  		RetentionDuration:         int64(15 * 24 * time.Hour / time.Millisecond),
  		MinBlockDuration:          DefaultBlockDuration,
  		MaxBlockDuration:          DefaultBlockDuration,
@@ -63,7 +94,7 @@ request the feature.
  		AllowOverlappingBlocks:    false,
  		WALCompression:            false,
  		StripeSize:                DefaultStripeSize,
-@@ -112,8 +112,8 @@ type Options struct {
+@@ -109,8 +109,8 @@
  	// the current size of the database.
  	MaxBytes int64
  
@@ -74,18 +105,18 @@ request the feature.
  
  	// Overlapping blocks are allowed if AllowOverlappingBlocks is true.
  	// This in-turn enables vertical compaction and vertical query merge.
-@@ -672,7 +672,7 @@ func open(dir string, l log.Logger, r pr
+@@ -678,7 +678,7 @@
+ 	if err != nil {
+ 		return nil, err
  	}
- 
- 	lockfileCreationStatus := lockfileDisabled
 -	if !opts.NoLockfile {
 +	if opts.UseLockfile {
- 		absdir, err := filepath.Abs(dir)
- 		if err != nil {
+ 		if err := db.locker.Lock(); err != nil {
  			return nil, err
+ 		}
 --- a/tsdb/db_test.go
 +++ b/tsdb/db_test.go
-@@ -2799,7 +2799,7 @@ func TestCompactHead(t *testing.T) {
+@@ -2902,7 +2902,7 @@
  	// Open a DB and append data to the WAL.
  	tsdbCfg := &Options{
  		RetentionDuration: int64(time.Hour * 24 * 15 / time.Millisecond),
@@ -94,7 +125,7 @@ request the feature.
  		MinBlockDuration:  int64(time.Hour * 2 / time.Millisecond),
  		MaxBlockDuration:  int64(time.Hour * 2 / time.Millisecond),
  		WALCompression:    true,
-@@ -2989,7 +2989,7 @@ func TestOneCheckpointPerCompactCall(t *
+@@ -3092,7 +3092,7 @@
  	blockRange := int64(1000)
  	tsdbCfg := &Options{
  		RetentionDuration: blockRange * 1000,
@@ -103,12 +134,54 @@ request the feature.
  		MinBlockDuration:  blockRange,
  		MaxBlockDuration:  blockRange,
  	}
-@@ -3170,7 +3170,7 @@ func TestLockfileMetric(t *testing.T) {
- 				require.NoError(t, err)
- 			}
- 			opts := DefaultOptions()
--			opts.NoLockfile = c.lockFileDisabled
-+			opts.UseLockfile = !c.lockFileDisabled
+@@ -3230,7 +3230,7 @@
+ func TestLockfile(t *testing.T) {
+ 	tsdbutil.TestDirLockerUsage(t, func(t *testing.T, data string, createLock bool) (*tsdbutil.DirLocker, testutil.Closer) {
+ 		opts := DefaultOptions()
+-		opts.NoLockfile = !createLock
++		opts.UseLockfile = createLock
+ 
+ 		// Create the DB. This should create lockfile and its metrics.
+ 		db, err := Open(data, nil, nil, opts, nil)
+--- a/tsdb/agent/db.go
++++ b/tsdb/agent/db.go
+@@ -71,8 +71,8 @@
+ 	// deleted.
+ 	MinWALTime, MaxWALTime int64
+ 
+-	// NoLockfile disables creation and consideration of a lock file.
+-	NoLockfile bool
++	// UseLockfile enables creation and consideration of a lock file.
++	UseLockfile bool
+ }
+ 
+ // DefaultOptions used for the WAL storage. They are sane for setups using
+@@ -85,7 +85,7 @@
+ 		TruncateFrequency: DefaultTruncateFrequency,
+ 		MinWALTime:        DefaultMinWALTime,
+ 		MaxWALTime:        DefaultMaxWALTime,
+-		NoLockfile:        false,
++		UseLockfile:       true,
+ 	}
+ }
+ 
+@@ -244,7 +244,7 @@
+ 	if err != nil {
+ 		return nil, err
+ 	}
+-	if !opts.NoLockfile {
++	if opts.UseLockfile {
+ 		if err := locker.Lock(); err != nil {
+ 			return nil, err
+ 		}
+--- a/tsdb/agent/db_test.go
++++ b/tsdb/agent/db_test.go
+@@ -397,7 +397,7 @@
+ 		})
+ 
+ 		opts := DefaultOptions()
+-		opts.NoLockfile = !createLock
++		opts.UseLockfile = createLock
  
- 			// Create the DB, this should create a lockfile and the metrics
- 			db, err := Open(tmpdir, nil, nil, opts, nil)
+ 		// Create the DB. This should create lockfile and its metrics.
+ 		db, err := Open(logger, nil, rs, data, opts)
diff -pruN 2.31.2+ds1-1/debian/patches/09-Fix_hanging_test.patch 2.33.5+ds1-2/debian/patches/09-Fix_hanging_test.patch
--- 2.31.2+ds1-1/debian/patches/09-Fix_hanging_test.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/09-Fix_hanging_test.patch	2022-03-23 22:28:20.000000000 +0000
@@ -3,13 +3,15 @@ Date: Sat, 20 Jun 2020 15:14:34 -0300
 Subject: Fix test failing due to gRPC server not finishing promptly: it
  times out at 20s, so let's wait 21.
 Forwarded: https://github.com/prometheus/prometheus/issues/4587
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
+
 ---
  cmd/prometheus/main_unix_test.go |    2 +-
  1 file changed, 1 insertion(+), 1 deletion(-)
 
 --- a/cmd/prometheus/main_unix_test.go
 +++ b/cmd/prometheus/main_unix_test.go
-@@ -56,7 +56,7 @@ Loop:
+@@ -62,7 +62,7 @@
  			select {
  			case stoppedErr = <-done:
  				break Loop
diff -pruN 2.31.2+ds1-1/debian/patches/10-Add_agent_ui.patch 2.33.5+ds1-2/debian/patches/10-Add_agent_ui.patch
--- 2.31.2+ds1-1/debian/patches/10-Add_agent_ui.patch	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/10-Add_agent_ui.patch	2022-03-23 22:28:20.000000000 +0000
@@ -0,0 +1,62 @@
+From: Martina Ferrari <tina@debian.org>
+Date: Sat Mar 12 18:52:43 2022 +0000
+Subject: Backport agent mode page to classic UI.
+Forwarded: not-needed
+Last-Updated: Sat Mar 12 18:52:43 2022 +0000
+
+--- /dev/null
++++ b/web/ui/templates/agent.html
+@@ -0,0 +1,12 @@
++{{define "head"}}<!-- nix -->{{end}}
++
++{{define "content"}}
++  <div class="container-fluid">
++    <h2 id="agentmode">Prometheus Agent</h2>
++    <p>This Prometheus instance is running in <strong>agent mode</strong>. In
++    this mode, Prometheus is only used to scrape discovered targets and forward
++    the scraped metrics to remote write endpoints.</p>
++    <p>Some features are not available in this mode, such as querying and
++    alerting.</p>
++  </div>
++{{end}}
+--- a/web/web.go
++++ b/web/web.go
+@@ -375,6 +375,7 @@
+ 		http.Redirect(w, r, path.Join(o.ExternalURL.Path, p)+"?"+r.URL.RawQuery, http.StatusFound)
+ 	})
+ 
++	router.Get("/classic/agent", readyf(h.agent))
+ 	router.Get("/classic/alerts", readyf(h.alerts))
+ 	router.Get("/classic/graph", readyf(h.graph))
+ 	router.Get("/classic/status", readyf(h.status))
+@@ -632,6 +633,10 @@
+ 	}
+ }
+ 
++func (h *Handler) agent(w http.ResponseWriter, r *http.Request) {
++	h.executeTemplate(w, "agent.html", nil)
++}
++
+ func (h *Handler) alerts(w http.ResponseWriter, r *http.Request) {
+ 	var groups []*rules.Group
+ 	for _, group := range h.ruleManager.RuleGroups() {
+--- a/web/web_test.go
++++ b/web/web_test.go
+@@ -177,6 +177,8 @@
+ 
+ 	for _, u := range []string{
+ 		baseURL + "/-/ready",
++		baseURL + "/classic/agent",
++		baseURL + "/classic/alerts",
+ 		baseURL + "/classic/graph",
+ 		baseURL + "/classic/flags",
+ 		baseURL + "/classic/rules",
+@@ -207,6 +209,8 @@
+ 	for _, u := range []string{
+ 		baseURL + "/-/healthy",
+ 		baseURL + "/-/ready",
++		baseURL + "/classic/agent",
++		baseURL + "/classic/alerts",
+ 		baseURL + "/classic/graph",
+ 		baseURL + "/classic/flags",
+ 		baseURL + "/classic/rules",
diff -pruN 2.31.2+ds1-1/debian/patches/11-Disable_react_UI.patch 2.33.5+ds1-2/debian/patches/11-Disable_react_UI.patch
--- 2.31.2+ds1-1/debian/patches/11-Disable_react_UI.patch	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/11-Disable_react_UI.patch	2022-03-23 22:28:20.000000000 +0000
@@ -0,0 +1,144 @@
+From: Martina Ferrari <tina@debian.org>
+Date: Mon, 14 Mar 2022 23:20:08 +0000
+Subject: Disable the React UI
+Forwarded: not-needed
+Last-Updated: Mon, 14 Mar 2022 23:20:08 +0000
+
+--- a/web/web.go
++++ b/web/web.go
+@@ -14,7 +14,6 @@
+ package web
+ 
+ import (
+-	"bytes"
+ 	"context"
+ 	"encoding/json"
+ 	"fmt"
+@@ -32,7 +31,6 @@
+ 	"regexp"
+ 	"runtime"
+ 	"sort"
+-	"strconv"
+ 	"strings"
+ 	"sync"
+ 	template_text "text/template"
+@@ -69,29 +67,6 @@
+ 	"github.com/prometheus/prometheus/web/ui"
+ )
+ 
+-// Paths that are handled by the React / Reach router that should all be served the main React app's index.html.
+-var reactRouterPaths = []string{
+-	"/config",
+-	"/flags",
+-	"/service-discovery",
+-	"/status",
+-	"/targets",
+-	"/starting",
+-}
+-
+-// Paths that are handled by the React router when the Agent mode is set.
+-var reactRouterAgentPaths = []string{
+-	"/agent",
+-}
+-
+-// Paths that are handled by the React router when the Agent mode is not set.
+-var reactRouterServerPaths = []string{
+-	"/alerts",
+-	"/graph",
+-	"/rules",
+-	"/tsdb-status",
+-}
+-
+ // withStackTrace logs the stack trace in case the request panics. The function
+ // will re-raise the error which will then be handled by the net/http package.
+ // It is needed because the go-kit log package doesn't manage properly the
+@@ -355,9 +330,9 @@
+ 		router = router.WithPrefix(o.RoutePrefix)
+ 	}
+ 
+-	homePage := "/graph"
++	homePage := "/classic/graph"
+ 	if o.IsAgent {
+-		homePage = "/agent"
++		homePage = "/classic/agent"
+ 	}
+ 
+ 	readyf := h.testReady
+@@ -366,13 +341,12 @@
+ 		http.Redirect(w, r, path.Join(o.ExternalURL.Path, homePage), http.StatusFound)
+ 	})
+ 	router.Get("/classic/", func(w http.ResponseWriter, r *http.Request) {
+-		http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/classic/graph"), http.StatusFound)
++		http.Redirect(w, r, path.Join(o.ExternalURL.Path, homePage), http.StatusFound)
+ 	})
+ 
+-	// Redirect the original React UI's path (under "/new") to its new path at the root.
+-	router.Get("/new/*path", func(w http.ResponseWriter, r *http.Request) {
+-		p := route.Param(r.Context(), "path")
+-		http.Redirect(w, r, path.Join(o.ExternalURL.Path, p)+"?"+r.URL.RawQuery, http.StatusFound)
++	// Catch requests to legacy URLs that would try to hit the "new" web UI
++	router.Get("/graph/", func(w http.ResponseWriter, r *http.Request) {
++		http.Redirect(w, r, path.Join(o.ExternalURL.Path, "/classic/graph")+"?"+r.URL.RawQuery, http.StatusFound)
+ 	})
+ 
+ 	router.Get("/classic/agent", readyf(h.agent))
+@@ -408,59 +382,6 @@
+ 
+ 	router.Get("/consoles/*filepath", readyf(h.consoles))
+ 
+-	serveReactApp := func(w http.ResponseWriter, r *http.Request) {
+-		f, err := ui.Assets.Open("/static/react/index.html")
+-		if err != nil {
+-			w.WriteHeader(http.StatusInternalServerError)
+-			fmt.Fprintf(w, "Error opening React index.html: %v", err)
+-			return
+-		}
+-		defer func() { _ = f.Close() }()
+-		idx, err := ioutil.ReadAll(f)
+-		if err != nil {
+-			w.WriteHeader(http.StatusInternalServerError)
+-			fmt.Fprintf(w, "Error reading React index.html: %v", err)
+-			return
+-		}
+-		replacedIdx := bytes.ReplaceAll(idx, []byte("CONSOLES_LINK_PLACEHOLDER"), []byte(h.consolesPath()))
+-		replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("TITLE_PLACEHOLDER"), []byte(h.options.PageTitle))
+-		replacedIdx = bytes.ReplaceAll(replacedIdx, []byte("AGENT_MODE_PLACEHOLDER"), []byte(strconv.FormatBool(h.options.IsAgent)))
+-		w.Write(replacedIdx)
+-	}
+-
+-	// Serve the React app.
+-	for _, p := range reactRouterPaths {
+-		router.Get(p, serveReactApp)
+-	}
+-
+-	if h.options.IsAgent {
+-		for _, p := range reactRouterAgentPaths {
+-			router.Get(p, serveReactApp)
+-		}
+-	} else {
+-		for _, p := range reactRouterServerPaths {
+-			router.Get(p, serveReactApp)
+-		}
+-	}
+-
+-	// The favicon and manifest are bundled as part of the React app, but we want to serve
+-	// them on the root.
+-	for _, p := range []string{"/favicon.ico", "/manifest.json"} {
+-		assetPath := "/static/react" + p
+-		router.Get(p, func(w http.ResponseWriter, r *http.Request) {
+-			r.URL.Path = assetPath
+-			fs := server.StaticFileServer(ui.Assets)
+-			fs.ServeHTTP(w, r)
+-		})
+-	}
+-
+-	// Static files required by the React app.
+-	router.Get("/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
+-		r.URL.Path = path.Join("/static/react/static", route.Param(r.Context(), "filepath"))
+-		fs := server.StaticFileServer(ui.Assets)
+-		fs.ServeHTTP(w, r)
+-	})
+-
+ 	if o.UserAssetsPath != "" {
+ 		router.Get("/user/*filepath", route.FileServe(o.UserAssetsPath))
+ 	}
diff -pruN 2.31.2+ds1-1/debian/patches/11-Set_temporary_storage_path_for_tsdb.patch 2.33.5+ds1-2/debian/patches/11-Set_temporary_storage_path_for_tsdb.patch
--- 2.31.2+ds1-1/debian/patches/11-Set_temporary_storage_path_for_tsdb.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/11-Set_temporary_storage_path_for_tsdb.patch	1970-01-01 00:00:00.000000000 +0000
@@ -1,40 +0,0 @@
-From: Lucas Kanashiro <kanashiro@debian.org>
-Date: Fri, 5 Jun 2020 18:31:02 -0300
-Subject: Set temporary storage path for tsdb
-
-Those tests fail because they try to use the default path which is
-/var/lib/prometheus/metrics2 (created during installation). Use a
-temporary path.
----
- cmd/prometheus/main_test.go |    6 +++---
- 1 file changed, 3 insertions(+), 3 deletions(-)
-
---- a/cmd/prometheus/main_test.go
-+++ b/cmd/prometheus/main_test.go
-@@ -114,7 +114,7 @@ func TestFailedStartupExitCode(t *testin
- 	fakeInputFile := "fake-input-file"
- 	expectedExitStatus := 2
- 
--	prom := exec.Command(promPath, "-test.main", "--config.file="+fakeInputFile)
-+	prom := exec.Command(promPath, "-test.main", "--storage.tsdb.path="+filepath.Join(os.TempDir(), "data"), "--config.file="+fakeInputFile)
- 	err := prom.Run()
- 	require.Error(t, err)
- 
-@@ -202,7 +202,7 @@ func TestWALSegmentSizeBounds(t *testing
- 	}
- 
- 	for size, expectedExitStatus := range map[string]int{"9MB": 1, "257MB": 1, "10": 2, "1GB": 1, "12MB": 0} {
--		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
-+		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.wal-segment-size="+size, "--storage.tsdb.path="+promData, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
- 
- 		// Log stderr in case of failure.
- 		stderr, err := prom.StderrPipe()
-@@ -244,7 +244,7 @@ func TestMaxBlockChunkSegmentSizeBounds(
- 	}
- 
- 	for size, expectedExitStatus := range map[string]int{"512KB": 1, "1MB": 0} {
--		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
-+		prom := exec.Command(promPath, "-test.main", "--storage.tsdb.max-block-chunk-segment-size="+size, "--storage.tsdb.path="+promData, "--web.listen-address=0.0.0.0:0", "--config.file="+promConfig)
- 
- 		// Log stderr in case of failure.
- 		stderr, err := prom.StderrPipe()
diff -pruN 2.31.2+ds1-1/debian/patches/12-Do_not_embed_blobs.patch 2.33.5+ds1-2/debian/patches/12-Do_not_embed_blobs.patch
--- 2.31.2+ds1-1/debian/patches/12-Do_not_embed_blobs.patch	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/12-Do_not_embed_blobs.patch	2022-03-23 22:28:20.000000000 +0000
@@ -0,0 +1,210 @@
+From: Martina Ferrari <tina@debian.org>
+Date: Sat, 20 Jun 2020 15:32:33 -0300
+Subject: Do not embed blobs
+Forwarded: not-needed
+Last-Updated: Mon, 14 Mar 2022 23:20:08 +0000
+
+Avoid embedding blobs into the prometheus binary, instead use files
+installed on disk.
+---
+---
+ cmd/prometheus/main.go      |    3 +
+ console_libraries/prom.lib  |   17 ++++-----
+ web/ui/doc.go               |    2 +
+ web/ui/templates/_base.html |   11 ++----
+ web/ui/templates/graph.html |   22 +++++-------
+ web/ui/ui.go                |    3 -
+ web/web.go                  |   78 +++-----------------------------------------
+ web/web_test.go             |    1 
+ 8 files changed, 37 insertions(+), 100 deletions(-)
+
+--- a/cmd/prometheus/main.go
++++ b/cmd/prometheus/main.go
+@@ -246,6 +246,9 @@
+ 		"Prefix for the internal routes of web endpoints. Defaults to path of --web.external-url.").
+ 		PlaceHolder("<path>").StringVar(&cfg.web.RoutePrefix)
+ 
++	a.Flag("web.local-assets", "Path to static asset/templates directory.").
++		Default("/usr/share/prometheus/web/").StringVar(&cfg.web.LocalAssets)
++
+ 	a.Flag("web.user-assets", "Path to user asset directory, available at /user.").
+ 		PlaceHolder("<path>").StringVar(&cfg.web.UserAssetsPath)
+ 
+--- a/console_libraries/prom.lib
++++ b/console_libraries/prom.lib
+@@ -1,16 +1,15 @@
+ {{/* vim: set ft=html: */}}
+ {{/* Load Prometheus console library JS/CSS. Should go in <head> */}}
+ {{ define "prom_console_head" }}
+-<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css">
+-<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css">
++<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.css">
++<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/bootstrap4/css/bootstrap.min.css">
+ <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prom_console.css">
+-<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css">
+-<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js"></script>
+-<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js"></script>
+-<script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js"></script>
+-<script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js"></script>
+-<script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js"></script>
+-<script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js"></script>
++<link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css">
++<script src="{{ pathPrefix }}/classic/static/d3.min.js"></script>
++<script src="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.js"></script>
++<script src="{{ pathPrefix }}/classic/static/jquery/jquery.min.js"></script>
++<script src="{{ pathPrefix }}/classic/static/popper.js/popper.min.js"></script>
++<script src="{{ pathPrefix }}/classic/static/bootstrap4/js/bootstrap.min.js"></script>
+ 
+ <script>
+ var PATH_PREFIX = "{{ pathPrefix }}";
+--- a/web/ui/templates/graph.html
++++ b/web/ui/templates/graph.html
+@@ -1,19 +1,18 @@
+ {{define "head"}}
+-    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.css?v={{ buildVersion }}">
+-    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.css?v={{ buildVersion }}">
++    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.css?v={{ buildVersion }}">
++    <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.css?v={{ buildVersion }}">
+ 
+-    <script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.v3.js?v={{ buildVersion }}"></script>
+-    <script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/vendor/d3.layout.min.js?v={{ buildVersion }}"></script>
+-    <script src="{{ pathPrefix }}/classic/static/vendor/rickshaw/rickshaw.min.js?v={{ buildVersion }}"></script>
+-    <script src="{{ pathPrefix }}/classic/static/vendor/moment/moment.min.js?v={{ buildVersion }}"></script>
+-    <script src="{{ pathPrefix }}/classic/static/vendor/moment/moment-timezone-with-data.min.js?v={{ buildVersion }}"></script>
+-    <script src="{{ pathPrefix }}/classic/static/vendor/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.js?v={{ buildVersion }}"></script>
+-    <script src="{{ pathPrefix }}/classic/static/vendor/bootstrap3-typeahead/bootstrap3-typeahead.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/d3/d3.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/rickshaw/rickshaw.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/moment/moment.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/moment-timezone/moment-timezone-with-data.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/eonasdan-bootstrap-datetimepicker/bootstrap-datetimepicker.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/vendor/bootstrap3-typeahead/bootstrap3-typeahead.js?v={{ buildVersion }}"></script>
+     <script src="{{ pathPrefix }}/classic/static/vendor/fuzzy/fuzzy.js?v={{ buildVersion }}"></script>
+ 
+-    <script src="{{ pathPrefix }}/classic/static/vendor/mustache/mustache.min.js?v={{ buildVersion }}"></script>
++    <script src="{{ pathPrefix }}/classic/static/mustache/mustache.min.js?v={{ buildVersion }}"></script>
+     <script src="{{ pathPrefix }}/classic/static/vendor/js/jquery.selection.js?v={{ buildVersion }}"></script>
+-    <!-- <script src="{{ pathPrefix }}/classic/static/vendor/js/jquery.hotkeys.js?v={{ buildVersion }}"></script> -->
++    <!-- <script src="{{ pathPrefix }}/classic/static/jquery-hotkeys/jquery.hotkeys.js?v={{ buildVersion }}"></script> -->
+ 
+     <script src="{{ pathPrefix }}/classic/static/js/graph/index.js?v={{ buildVersion }}"></script>
+ 
+@@ -29,7 +28,6 @@
+           <i class="glyphicon glyphicon-unchecked"></i>
+           <button type="button" class="search-history" title="search previous queries">Enable query history</button>
+         </div>
+-        <button type="button" class="btn btn-link btn-sm new_ui_button" onclick="window.location.pathname='{{ pathPrefix }}/graph'">Back to the new UI</button>
+       </div>
+     </div>
+ 
+--- a/web/web.go
++++ b/web/web.go
+@@ -48,7 +48,6 @@
+ 	io_prometheus_client "github.com/prometheus/client_model/go"
+ 	"github.com/prometheus/common/model"
+ 	"github.com/prometheus/common/route"
+-	"github.com/prometheus/common/server"
+ 	toolkit_web "github.com/prometheus/exporter-toolkit/web"
+ 	"go.uber.org/atomic"
+ 	"golang.org/x/net/netutil"
+@@ -64,7 +63,6 @@
+ 	"github.com/prometheus/prometheus/tsdb/index"
+ 	"github.com/prometheus/prometheus/util/httputil"
+ 	api_v1 "github.com/prometheus/prometheus/web/api/v1"
+-	"github.com/prometheus/prometheus/web/ui"
+ )
+ 
+ // withStackTrace logs the stack trace in case the request panics. The function
+@@ -222,7 +220,7 @@
+ 	MaxConnections             int
+ 	ExternalURL                *url.URL
+ 	RoutePrefix                string
+-	UseLocalAssets             bool
++	LocalAssets                string
+ 	UserAssetsPath             string
+ 	ConsoleTemplatesPath       string
+ 	ConsoleLibrariesPath       string
+@@ -358,11 +356,7 @@
+ 	router.Get("/classic/rules", readyf(h.rules))
+ 	router.Get("/classic/targets", readyf(h.targets))
+ 	router.Get("/classic/service-discovery", readyf(h.serviceDiscovery))
+-	router.Get("/classic/static/*filepath", func(w http.ResponseWriter, r *http.Request) {
+-		r.URL.Path = path.Join("/static", route.Param(r.Context(), "filepath"))
+-		fs := server.StaticFileServer(ui.Assets)
+-		fs.ServeHTTP(w, r)
+-	})
++	router.Get("/classic/static/*filepath", route.FileServe(path.Join(o.LocalAssets, "/static")))
+ 	// Make sure that "<path-prefix>/classic" is redirected to "<path-prefix>/classic/" and
+ 	// not just the naked "/classic/", which would be the default behavior of the router
+ 	// with the "RedirectTrailingSlash" option (https://pkg.go.dev/github.com/julienschmidt/httprouter#Router.RedirectTrailingSlash),
+@@ -1029,7 +1023,7 @@
+ 	var tmpl string
+ 
+ 	appendf := func(name string) error {
+-		f, err := ui.Assets.Open(path.Join("/templates", name))
++		f, err := os.Open(filepath.Join(h.options.LocalAssets, "templates", name))
+ 		if err != nil {
+ 			return err
+ 		}
+--- a/web/web_test.go
++++ b/web/web_test.go
+@@ -142,6 +142,7 @@
+ 		},
+ 		Version:  &PrometheusVersion{},
+ 		Gatherer: prometheus.DefaultGatherer,
++		LocalAssets:    "../../../../../../web/ui",
+ 	}
+ 
+ 	opts.Flags = map[string]string{}
+--- a/web/ui/templates/_base.html
++++ b/web/ui/templates/_base.html
+@@ -5,13 +5,13 @@
+         <meta name="robots" content="noindex,nofollow">
+         <title>{{ pageTitle }}</title>
+         <link rel="shortcut icon" href="{{ pathPrefix }}/classic/static/img/favicon.ico?v={{ buildVersion }}">
+-        <script src="{{ pathPrefix }}/classic/static/vendor/js/jquery-3.5.1.min.js?v={{ buildVersion }}"></script>
+-        <script src="{{ pathPrefix }}/classic/static/vendor/js/popper.min.js?v={{ buildVersion }}"></script>
+-        <script src="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/js/bootstrap.min.js?v={{ buildVersion }}"></script>
++        <script src="{{ pathPrefix }}/classic/static/jquery/jquery.min.js?v={{ buildVersion }}"></script>
++        <script src="{{ pathPrefix }}/classic/static/popper.js/popper.min.js?v={{ buildVersion }}"></script>
++        <script src="{{ pathPrefix }}/classic/static/bootstrap4/js/bootstrap.min.js?v={{ buildVersion }}"></script>
+ 
+-        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap-4.5.2/css/bootstrap.min.css?v={{ buildVersion }}">
++        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/bootstrap4/css/bootstrap.min.css?v={{ buildVersion }}">
+         <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/css/prometheus.css?v={{ buildVersion }}">
+-        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.min.css?v={{ buildVersion }}">
++        <link type="text/css" rel="stylesheet" href="{{ pathPrefix }}/classic/static/vendor/bootstrap4-glyphicons/css/bootstrap-glyphicons.css?v={{ buildVersion }}">
+ 
+         <script>
+             var PATH_PREFIX = "{{ pathPrefix }}";
+@@ -61,7 +61,6 @@
+                         <li class= "nav-item">
+                             <a class ="nav-link" href="https://prometheus.io/docs/prometheus/latest/getting_started/" target="_blank">Help</a>
+                         </li>
+-                        <li class="nav-item"><a class="nav-link" href="{{ pathPrefix }}/graph">New UI</a></li>
+                     </ul>
+                 </div>
+             </div>
+--- a/web/ui/doc.go
++++ b/web/ui/doc.go
+@@ -11,6 +11,8 @@
+ // See the License for the specific language governing permissions and
+ // limitations under the License.
+ 
++// +build ignore
++
+ // Package ui provides the assets via a virtual filesystem.
+ package ui
+ 
+--- a/web/ui/ui.go
++++ b/web/ui/ui.go
+@@ -11,8 +11,7 @@
+ // See the License for the specific language governing permissions and
+ // limitations under the License.
+ 
+-//go:build !builtinassets
+-// +build !builtinassets
++// +build ignore
+ 
+ package ui
+ 
diff -pruN 2.31.2+ds1-1/debian/patches/13-Disable_jaeger.patch 2.33.5+ds1-2/debian/patches/13-Disable_jaeger.patch
--- 2.31.2+ds1-1/debian/patches/13-Disable_jaeger.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/13-Disable_jaeger.patch	2022-03-23 22:28:20.000000000 +0000
@@ -4,7 +4,7 @@ Description: Disable jaeger tracing
  for the use of Prometheus developers, and will not be a long term feature.
 Author: Daniel Swarbrick <daniel.swarbrick@cloud.ionos.com>
 Forwarded: not-needed
-Last-Update: 2020-07-04
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 ---
 ---
  cmd/prometheus/main.go |   52 -------------------------------------------------
@@ -13,15 +13,15 @@ Last-Update: 2020-07-04
 
 --- a/promql/engine.go
 +++ b/promql/engine.go
-@@ -33,7 +33,6 @@ import (
+@@ -33,7 +33,6 @@
  	"github.com/pkg/errors"
  	"github.com/prometheus/client_golang/prometheus"
  	"github.com/prometheus/common/model"
 -	"github.com/uber/jaeger-client-go"
  
- 	"github.com/prometheus/prometheus/pkg/labels"
- 	"github.com/prometheus/prometheus/pkg/timestamp"
-@@ -493,11 +492,6 @@ func (ng *Engine) exec(ctx context.Conte
+ 	"github.com/prometheus/prometheus/model/labels"
+ 	"github.com/prometheus/prometheus/model/timestamp"
+@@ -505,11 +504,6 @@
  				f = append(f, "error", err)
  			}
  			f = append(f, "stats", stats.NewQueryStats(q.Stats()))
@@ -35,7 +35,7 @@ Last-Update: 2020-07-04
  					f = append(f, k, v)
 --- a/cmd/prometheus/main.go
 +++ b/cmd/prometheus/main.go
-@@ -17,7 +17,6 @@ package main
+@@ -17,7 +17,6 @@
  import (
  	"context"
  	"fmt"
@@ -43,7 +43,7 @@ Last-Update: 2020-07-04
  	"math"
  	"math/bits"
  	"net"
-@@ -39,7 +38,6 @@ import (
+@@ -39,7 +38,6 @@
  	"github.com/go-kit/log/level"
  	conntrack "github.com/mwitkow/go-conntrack"
  	"github.com/oklog/run"
@@ -51,7 +51,7 @@ Last-Update: 2020-07-04
  	"github.com/pkg/errors"
  	"github.com/prometheus/client_golang/prometheus"
  	"github.com/prometheus/common/model"
-@@ -48,8 +46,6 @@ import (
+@@ -48,8 +46,6 @@
  	"github.com/prometheus/common/version"
  	toolkit_web "github.com/prometheus/exporter-toolkit/web"
  	toolkit_webflag "github.com/prometheus/exporter-toolkit/web/kingpinflag"
@@ -60,7 +60,7 @@ Last-Update: 2020-07-04
  	"go.uber.org/atomic"
  	kingpin "gopkg.in/alecthomas/kingpin.v2"
  
-@@ -651,13 +647,6 @@ func main() {
+@@ -737,13 +733,6 @@
  		})
  	}
  
@@ -74,7 +74,7 @@ Last-Update: 2020-07-04
  	listener, err := webHandler.Listener()
  	if err != nil {
  		level.Error(logger).Log("msg", "Unable to start web listener", "err", err)
-@@ -1318,47 +1307,6 @@ func (opts tsdbOptions) ToTSDBOptions()
+@@ -1517,47 +1506,6 @@
  	}
  }
  
diff -pruN 2.31.2+ds1-1/debian/patches/14-Disable_wal_test_goleak.patch 2.33.5+ds1-2/debian/patches/14-Disable_wal_test_goleak.patch
--- 2.31.2+ds1-1/debian/patches/14-Disable_wal_test_goleak.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/14-Disable_wal_test_goleak.patch	2022-03-23 22:28:20.000000000 +0000
@@ -3,6 +3,7 @@ Description: Disable goleak test in WAL
 Author: Daniel Swarbrick <daniel.swarbrick@cloud.ionos.com>
 Forwarded: not-needed
 Bug: https://github.com/prometheus/prometheus/issues/7672
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 ---
 ---
  tsdb/db_test.go      |    5 -----
@@ -11,19 +12,32 @@ Bug: https://github.com/prometheus/prome
 
 --- a/tsdb/db_test.go
 +++ b/tsdb/db_test.go
-@@ -38,7 +38,6 @@ import (
+@@ -17,7 +17,6 @@
+ 	"bufio"
+ 	"context"
+ 	"encoding/binary"
+-	"flag"
+ 	"fmt"
+ 	"hash/crc32"
+ 	"io/ioutil"
+@@ -39,7 +38,6 @@
  	"github.com/prometheus/client_golang/prometheus"
  	prom_testutil "github.com/prometheus/client_golang/prometheus/testutil"
  	"github.com/stretchr/testify/require"
 -	"go.uber.org/goleak"
  
- 	"github.com/prometheus/prometheus/pkg/labels"
+ 	"github.com/prometheus/prometheus/model/labels"
  	"github.com/prometheus/prometheus/storage"
-@@ -53,10 +52,6 @@ import (
+@@ -54,15 +52,6 @@
  	"github.com/prometheus/prometheus/util/testutil"
  )
  
 -func TestMain(m *testing.M) {
+-	var isolationEnabled bool
+-	flag.BoolVar(&isolationEnabled, "test.tsdb-isolation", true, "enable isolation")
+-	flag.Parse()
+-	defaultIsolationDisabled = !isolationEnabled
+-
 -	goleak.VerifyTestMain(m, goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func1"), goleak.IgnoreTopFunction("github.com/prometheus/prometheus/tsdb.(*SegmentWAL).cut.func2"))
 -}
 -
@@ -32,7 +46,7 @@ Bug: https://github.com/prometheus/prome
  	require.NoError(t, err)
 --- a/tsdb/wal/wal_test.go
 +++ b/tsdb/wal/wal_test.go
-@@ -26,16 +26,11 @@ import (
+@@ -25,16 +25,11 @@
  
  	client_testutil "github.com/prometheus/client_golang/prometheus/testutil"
  	"github.com/stretchr/testify/require"
diff -pruN 2.31.2+ds1-1/debian/patches/15-Disable-uyuni.patch 2.33.5+ds1-2/debian/patches/15-Disable-uyuni.patch
--- 2.31.2+ds1-1/debian/patches/15-Disable-uyuni.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/15-Disable-uyuni.patch	2022-03-23 22:28:20.000000000 +0000
@@ -2,6 +2,7 @@ Author: Guillem Jover <gjover@sipwise.co
 Forwarded: not-needed
 Description: Disable uyuni discovery module
  This requires modules not present in Debian.
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 ---
  config/config_test.go         |   23 +----------------------
@@ -12,15 +13,15 @@ Description: Disable uyuni discovery mod
 
 --- a/config/config_test.go
 +++ b/config/config_test.go
-@@ -48,7 +48,6 @@ import (
+@@ -48,7 +48,6 @@
  	"github.com/prometheus/prometheus/discovery/scaleway"
  	"github.com/prometheus/prometheus/discovery/targetgroup"
  	"github.com/prometheus/prometheus/discovery/triton"
 -	"github.com/prometheus/prometheus/discovery/uyuni"
  	"github.com/prometheus/prometheus/discovery/xds"
  	"github.com/prometheus/prometheus/discovery/zookeeper"
- 	"github.com/prometheus/prometheus/pkg/labels"
-@@ -879,26 +878,6 @@ var expectedConf = &Config{
+ 	"github.com/prometheus/prometheus/model/labels"
+@@ -881,26 +880,6 @@
  				},
  			},
  		},
@@ -35,7 +36,7 @@ Description: Disable uyuni discovery mod
 -			Scheme:           DefaultScrapeConfig.Scheme,
 -			ServiceDiscoveryConfigs: discovery.Configs{
 -				&uyuni.SDConfig{
--					Server:          kubernetesSDHostURL(),
+-					Server:          "https://localhost:1234",
 -					Username:        "gopher",
 -					Password:        "hole",
 -					Entitlement:     "monitoring_entitled",
@@ -47,7 +48,7 @@ Description: Disable uyuni discovery mod
  	},
  	AlertingConfig: AlertingConfig{
  		AlertmanagerConfigs: []*AlertmanagerConfig{
-@@ -983,7 +962,7 @@ func TestElideSecrets(t *testing.T) {
+@@ -985,7 +964,7 @@
  	yamlConfig := string(config)
  
  	matches := secretRe.FindAllStringIndex(yamlConfig, -1)
@@ -56,9 +57,20 @@ Description: Disable uyuni discovery mod
  	require.NotContains(t, yamlConfig, "mysecret",
  		"yaml marshal reveals authentication credentials.")
  }
+@@ -1289,10 +1268,6 @@
+ 		filename: "empty_scrape_config_action.bad.yml",
+ 		errMsg:   "relabel action cannot be empty",
+ 	},
+-	{
+-		filename: "uyuni_no_server.bad.yml",
+-		errMsg:   "Uyuni SD configuration requires server host",
+-	},
+ }
+ 
+ func TestBadConfigs(t *testing.T) {
 --- a/config/testdata/conf.good.yml
 +++ b/config/testdata/conf.good.yml
-@@ -326,12 +326,6 @@ scrape_configs:
+@@ -326,12 +326,6 @@
        - authorization:
            credentials: abcdef
  
@@ -73,7 +85,7 @@ Description: Disable uyuni discovery mod
      - scheme: https
 --- a/discovery/install/install.go
 +++ b/discovery/install/install.go
-@@ -33,7 +33,6 @@ import (
+@@ -33,7 +33,6 @@
  	_ "github.com/prometheus/prometheus/discovery/puppetdb"     // register puppetdb
  	_ "github.com/prometheus/prometheus/discovery/scaleway"     // register scaleway
  	_ "github.com/prometheus/prometheus/discovery/triton"       // register triton
@@ -86,6 +98,17 @@ Description: Disable uyuni discovery mod
 @@ -11,6 +11,8 @@
  // See the License for the specific language governing permissions and
  // limitations under the License.
+ 
++// +build ignore
++
+ package uyuni
+ 
+ import (
+--- a/discovery/uyuni/uyuni_test.go
++++ b/discovery/uyuni/uyuni_test.go
+@@ -11,6 +11,8 @@
+ // See the License for the specific language governing permissions and
+ // limitations under the License.
  
 +// +build ignore
 +
diff -pruN 2.31.2+ds1-1/debian/patches/16-Disable_xds.patch 2.33.5+ds1-2/debian/patches/16-Disable_xds.patch
--- 2.31.2+ds1-1/debian/patches/16-Disable_xds.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/16-Disable_xds.patch	2022-03-23 22:28:20.000000000 +0000
@@ -2,6 +2,7 @@ Author: Guillem Jover <gjover@sipwise.co
 Forwarded: not-needed
 Description: Disable xds discovery module
  This requires modules not present in Debian.
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 ---
  config/config_test.go              |   21 ---------------------
@@ -19,15 +20,15 @@ Description: Disable xds discovery modul
 
 --- a/config/config_test.go
 +++ b/config/config_test.go
-@@ -48,7 +48,6 @@ import (
+@@ -48,7 +48,6 @@
  	"github.com/prometheus/prometheus/discovery/scaleway"
  	"github.com/prometheus/prometheus/discovery/targetgroup"
  	"github.com/prometheus/prometheus/discovery/triton"
 -	"github.com/prometheus/prometheus/discovery/xds"
  	"github.com/prometheus/prometheus/discovery/zookeeper"
- 	"github.com/prometheus/prometheus/pkg/labels"
- 	"github.com/prometheus/prometheus/pkg/relabel"
-@@ -385,26 +384,6 @@ var expectedConf = &Config{
+ 	"github.com/prometheus/prometheus/model/labels"
+ 	"github.com/prometheus/prometheus/model/relabel"
+@@ -384,26 +383,6 @@
  			},
  		},
  		{
@@ -56,7 +57,7 @@ Description: Disable xds discovery modul
  			HonorTimestamps: true,
 --- a/discovery/install/install.go
 +++ b/discovery/install/install.go
-@@ -33,6 +33,5 @@ import (
+@@ -33,6 +33,5 @@
  	_ "github.com/prometheus/prometheus/discovery/puppetdb"     // register puppetdb
  	_ "github.com/prometheus/prometheus/discovery/scaleway"     // register scaleway
  	_ "github.com/prometheus/prometheus/discovery/triton"       // register triton
@@ -142,7 +143,7 @@ Description: Disable xds discovery modul
  import (
 --- a/config/testdata/conf.good.yml
 +++ b/config/testdata/conf.good.yml
-@@ -169,11 +169,6 @@ scrape_configs:
+@@ -169,11 +169,6 @@
      authorization:
        credentials: mysecret
  
@@ -156,7 +157,7 @@ Description: Disable xds discovery modul
        - servers:
 --- a/config/testdata/roundtrip.good.yml
 +++ b/config/testdata/roundtrip.good.yml
-@@ -88,9 +88,6 @@ scrape_configs:
+@@ -88,9 +88,6 @@
        - files:
            - single/file.yml
  
diff -pruN 2.31.2+ds1-1/debian/patches/17-Disable-linode.patch 2.33.5+ds1-2/debian/patches/17-Disable-linode.patch
--- 2.31.2+ds1-1/debian/patches/17-Disable-linode.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/17-Disable-linode.patch	2022-03-23 22:28:20.000000000 +0000
@@ -2,6 +2,7 @@ Author: Guillem Jover <gjover@sipwise.co
 Forwarded: not-needed
 Description: Disable linode discovery module
  This requires modules not present in Debian.
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 ---
  config/config_test.go           |   27 ---------------------------
@@ -14,7 +15,7 @@ Description: Disable linode discovery mo
 
 --- a/discovery/install/install.go
 +++ b/discovery/install/install.go
-@@ -26,7 +26,6 @@ import (
+@@ -26,7 +26,6 @@
  	_ "github.com/prometheus/prometheus/discovery/gce"          // register gce
  	_ "github.com/prometheus/prometheus/discovery/hetzner"      // register hetzner
  	_ "github.com/prometheus/prometheus/discovery/http"         // register http
@@ -57,7 +58,7 @@ Description: Disable linode discovery mo
  import (
 --- a/config/config_test.go
 +++ b/config/config_test.go
-@@ -40,7 +40,6 @@ import (
+@@ -40,7 +40,6 @@
  	"github.com/prometheus/prometheus/discovery/file"
  	"github.com/prometheus/prometheus/discovery/hetzner"
  	"github.com/prometheus/prometheus/discovery/http"
@@ -65,7 +66,7 @@ Description: Disable linode discovery mo
  	"github.com/prometheus/prometheus/discovery/marathon"
  	"github.com/prometheus/prometheus/discovery/moby"
  	"github.com/prometheus/prometheus/discovery/openstack"
-@@ -831,32 +830,6 @@ var expectedConf = &Config{
+@@ -833,32 +832,6 @@
  				},
  			},
  		},
@@ -100,7 +101,7 @@ Description: Disable linode discovery mo
  		AlertmanagerConfigs: []*AlertmanagerConfig{
 --- a/config/testdata/conf.good.yml
 +++ b/config/testdata/conf.good.yml
-@@ -316,11 +316,6 @@ scrape_configs:
+@@ -316,11 +316,6 @@
          access_key: SCWXXXXXXXXXXXXXXXXX
          secret_key: 11111111-1111-1111-1111-111111111111
  
diff -pruN 2.31.2+ds1-1/debian/patches/18-Disable-scaleway.patch 2.33.5+ds1-2/debian/patches/18-Disable-scaleway.patch
--- 2.31.2+ds1-1/debian/patches/18-Disable-scaleway.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/18-Disable-scaleway.patch	2022-03-23 22:28:20.000000000 +0000
@@ -2,6 +2,7 @@ Author: Guillem Jover <gjover@sipwise.co
 Forwarded: not-needed
 Description: Disable scaleway discovery module
  This requires modules not present in Debian.
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 ---
  config/config_test.go               |   51 ------------------------------------
@@ -15,7 +16,7 @@ Description: Disable scaleway discovery
 
 --- a/discovery/install/install.go
 +++ b/discovery/install/install.go
-@@ -30,7 +30,6 @@ import (
+@@ -30,7 +30,6 @@
  	_ "github.com/prometheus/prometheus/discovery/moby"         // register moby
  	_ "github.com/prometheus/prometheus/discovery/openstack"    // register openstack
  	_ "github.com/prometheus/prometheus/discovery/puppetdb"     // register puppetdb
@@ -69,7 +70,7 @@ Description: Disable scaleway discovery
  import (
 --- a/config/config_test.go
 +++ b/config/config_test.go
-@@ -44,7 +44,6 @@ import (
+@@ -44,7 +44,6 @@
  	"github.com/prometheus/prometheus/discovery/moby"
  	"github.com/prometheus/prometheus/discovery/openstack"
  	"github.com/prometheus/prometheus/discovery/puppetdb"
@@ -77,7 +78,7 @@ Description: Disable scaleway discovery
  	"github.com/prometheus/prometheus/discovery/targetgroup"
  	"github.com/prometheus/prometheus/discovery/triton"
  	"github.com/prometheus/prometheus/discovery/zookeeper"
-@@ -794,42 +793,6 @@ var expectedConf = &Config{
+@@ -796,42 +795,6 @@
  				},
  			},
  		},
@@ -120,7 +121,7 @@ Description: Disable scaleway discovery
  	},
  	AlertingConfig: AlertingConfig{
  		AlertmanagerConfigs: []*AlertmanagerConfig{
-@@ -914,7 +877,7 @@ func TestElideSecrets(t *testing.T) {
+@@ -916,7 +879,7 @@
  	yamlConfig := string(config)
  
  	matches := secretRe.FindAllStringIndex(yamlConfig, -1)
@@ -129,7 +130,7 @@ Description: Disable scaleway discovery
  	require.NotContains(t, yamlConfig, "mysecret",
  		"yaml marshal reveals authentication credentials.")
  }
-@@ -1147,18 +1110,6 @@ var expectedErrors = []struct {
+@@ -1189,18 +1152,6 @@
  		errMsg:   "invalid eureka server URL",
  	},
  	{
@@ -150,7 +151,7 @@ Description: Disable scaleway discovery
  	},
 --- a/config/testdata/conf.good.yml
 +++ b/config/testdata/conf.good.yml
-@@ -305,17 +305,6 @@ scrape_configs:
+@@ -305,17 +305,6 @@
      eureka_sd_configs:
        - server: "http://eureka.example.com:8761/eureka"
  
diff -pruN 2.31.2+ds1-1/debian/patches/19-Integer_overflows.patch 2.33.5+ds1-2/debian/patches/19-Integer_overflows.patch
--- 2.31.2+ds1-1/debian/patches/19-Integer_overflows.patch	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/19-Integer_overflows.patch	2022-03-23 22:28:20.000000000 +0000
@@ -0,0 +1,140 @@
+From: Martina Ferrari <tina@debian.org>
+Date: Wed, 23 Mar 2022 19:35:46 +0000
+Subject: Avoid literal integer overflows in 32 bit arches.
+Forwarded: https://github.com/prometheus/prometheus/issues/10481
+Last-Updated: Wed, 23 Mar 2022 19:35:46 +0000
+
+--- a/promql/functions_test.go
++++ b/promql/functions_test.go
+@@ -43,13 +43,14 @@
+ 
+ 	a := storage.Appender(context.Background())
+ 
++	var start, interval, i int64
+ 	metric := labels.FromStrings("__name__", "foo")
+-	start := 1493712816939
+-	interval := 30 * 1000
++	start = 1493712816939
++	interval = 30 * 1000
+ 	// Introduce some timestamp jitter to test 0 slope case.
+ 	// https://github.com/prometheus/prometheus/issues/7180
+-	for i := 0; i < 15; i++ {
+-		jitter := 12 * i % 2
++	for i = 0; i < 15; i++ {
++		var jitter int64 = 12 * i % 2
+ 		a.Append(0, metric, int64(start+interval*i+jitter), 1)
+ 	}
+ 
+--- a/template/template_test.go
++++ b/template/template_test.go
+@@ -281,13 +281,13 @@
+ 		{
+ 			// Humanize - int.
+ 			text:   "{{ range . }}{{ humanize . }}:{{ end }}",
+-			input:  []int{0, -1, 1, 1234567, math.MaxInt64},
++			input:  []int64{0, -1, 1, 1234567, math.MaxInt64},
+ 			output: "0:-1:1:1.235M:9.223E:",
+ 		},
+ 		{
+ 			// Humanize - uint.
+ 			text:   "{{ range . }}{{ humanize . }}:{{ end }}",
+-			input:  []uint{0, 1, 1234567, math.MaxUint64},
++			input:  []uint64{0, 1, 1234567, math.MaxUint64},
+ 			output: "0:1:1.235M:18.45E:",
+ 		},
+ 		{
+@@ -311,13 +311,13 @@
+ 		{
+ 			// Humanize1024 - int.
+ 			text:   "{{ range . }}{{ humanize1024 . }}:{{ end }}",
+-			input:  []int{0, -1, 1, 1234567, math.MaxInt64},
++			input:  []int64{0, -1, 1, 1234567, math.MaxInt64},
+ 			output: "0:-1:1:1.177Mi:8Ei:",
+ 		},
+ 		{
+ 			// Humanize1024 - uint.
+ 			text:   "{{ range . }}{{ humanize1024 . }}:{{ end }}",
+-			input:  []uint{0, 1, 1234567, math.MaxUint64},
++			input:  []uint64{0, 1, 1234567, math.MaxUint64},
+ 			output: "0:1:1.177Mi:16Ei:",
+ 		},
+ 		{
+@@ -353,14 +353,14 @@
+ 		{
+ 			// HumanizeDuration - int.
+ 			text:   "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
+-			input:  []int{0, -1, 1, 1234567, math.MaxInt64},
+-			output: "0s:-1s:1s:14d 6h 56m 7s:-106751991167300d -15h -30m -8s:",
++			input:  []int{0, -1, 1, 1234567, math.MaxInt32},
++			output: "0s:-1s:1s:14d 6h 56m 7s:24855d 3h 14m 7s:",
+ 		},
+ 		{
+ 			// HumanizeDuration - uint.
+ 			text:   "{{ range . }}{{ humanizeDuration . }}:{{ end }}",
+-			input:  []uint{0, 1, 1234567, math.MaxUint64},
+-			output: "0s:1s:14d 6h 56m 7s:-106751991167300d -15h -30m -8s:",
++			input:  []uint{0, 1, 1234567, math.MaxUint32},
++			output: "0s:1s:14d 6h 56m 7s:49710d 6h 28m 15s:",
+ 		},
+ 		{
+ 			// Humanize* Inf and NaN - float64.
+@@ -382,13 +382,13 @@
+ 		{
+ 			// HumanizePercentage - int.
+ 			text:   "{{ range . }}{{ humanizePercentage . }}:{{ end }}",
+-			input:  []int{0, -1, 1, 1234567, math.MaxInt64},
++			input:  []int64{0, -1, 1, 1234567, math.MaxInt64},
+ 			output: "0%:-100%:100%:1.235e+08%:9.223e+20%:",
+ 		},
+ 		{
+ 			// HumanizePercentage - uint.
+ 			text:   "{{ range . }}{{ humanizePercentage . }}:{{ end }}",
+-			input:  []uint{0, 1, 1234567, math.MaxUint64},
++			input:  []uint64{0, 1, 1234567, math.MaxUint64},
+ 			output: "0%:100%:1.235e+08%:1.845e+21%:",
+ 		},
+ 		{
+@@ -405,26 +405,26 @@
+ 		{
+ 			// HumanizeTimestamp - int.
+ 			text:   "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
+-			input:  []int{0, -1, 1, 1234567, 9223372036},
++			input:  []int64{0, -1, 1, 1234567, 9223372036},
+ 			output: "1970-01-01 00:00:00 +0000 UTC:1969-12-31 23:59:59 +0000 UTC:1970-01-01 00:00:01 +0000 UTC:1970-01-15 06:56:07 +0000 UTC:2262-04-11 23:47:16 +0000 UTC:",
+ 		},
+ 		{
+ 			// HumanizeTimestamp - uint.
+ 			text:   "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
+-			input:  []uint{0, 1, 1234567, 9223372036},
++			input:  []uint64{0, 1, 1234567, 9223372036},
+ 			output: "1970-01-01 00:00:00 +0000 UTC:1970-01-01 00:00:01 +0000 UTC:1970-01-15 06:56:07 +0000 UTC:2262-04-11 23:47:16 +0000 UTC:",
+ 		},
+ 		{
+ 			// HumanizeTimestamp - int with error.
+ 			text:       "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
+-			input:      []int{math.MinInt64, math.MaxInt64},
++			input:      []int64{math.MinInt64, math.MaxInt64},
+ 			shouldFail: true,
+ 			errorMsg:   `error executing template test: template: test:1:16: executing "test" at <humanizeTimestamp .>: error calling humanizeTimestamp: -9.223372036854776e+18 cannot be represented as a nanoseconds timestamp since it overflows int64`,
+ 		},
+ 		{
+ 			// HumanizeTimestamp - uint with error.
+ 			text:       "{{ range . }}{{ humanizeTimestamp . }}:{{ end }}",
+-			input:      []uint{math.MaxUint64},
++			input:      []uint64{math.MaxUint64},
+ 			shouldFail: true,
+ 			errorMsg:   `error executing template test: template: test:1:16: executing "test" at <humanizeTimestamp .>: error calling humanizeTimestamp: 1.8446744073709552e+19 cannot be represented as a nanoseconds timestamp since it overflows int64`,
+ 		},
+--- a/template/template.go
++++ b/template/template.go
+@@ -109,6 +109,10 @@
+ 		return float64(v), nil
+ 	case uint:
+ 		return float64(v), nil
++	case int64:
++		return float64(v), nil
++	case uint64:
++		return float64(v), nil
+ 	default:
+ 		return 0, fmt.Errorf("can't convert %T to float", v)
+ 	}
diff -pruN 2.31.2+ds1-1/debian/patches/91-Revert_switch_to_go_zookeeper.patch 2.33.5+ds1-2/debian/patches/91-Revert_switch_to_go_zookeeper.patch
--- 2.31.2+ds1-1/debian/patches/91-Revert_switch_to_go_zookeeper.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/91-Revert_switch_to_go_zookeeper.patch	2022-03-23 22:28:20.000000000 +0000
@@ -12,7 +12,7 @@ get uploaded.
 
 --- a/discovery/zookeeper/zookeeper.go
 +++ b/discovery/zookeeper/zookeeper.go
-@@ -23,9 +23,9 @@ import (
+@@ -23,9 +23,9 @@
  	"time"
  
  	"github.com/go-kit/log"
@@ -25,7 +25,7 @@ get uploaded.
  	"github.com/prometheus/prometheus/discovery/targetgroup"
 --- a/util/treecache/treecache.go
 +++ b/util/treecache/treecache.go
-@@ -22,9 +22,9 @@ import (
+@@ -22,9 +22,9 @@
  
  	"github.com/go-kit/log"
  	"github.com/go-kit/log/level"
diff -pruN 2.31.2+ds1-1/debian/patches/92-Revert-openstack-gophercloud-API-change.patch 2.33.5+ds1-2/debian/patches/92-Revert-openstack-gophercloud-API-change.patch
--- 2.31.2+ds1-1/debian/patches/92-Revert-openstack-gophercloud-API-change.patch	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/92-Revert-openstack-gophercloud-API-change.patch	2022-03-23 22:28:20.000000000 +0000
@@ -1,6 +1,7 @@
 Author: Guillem Jover <gjover@sipwise.com>
 Forwarded: not-needed
 Description: Revert gophercloud API update
+Last-Updated: Sat Mar 12 18:35:22 2022 +0000
 
 The needed modules is not at the required version. Can be dropped once
 it gets updated.
@@ -11,7 +12,7 @@ it gets updated.
 
 --- a/discovery/openstack/hypervisor.go
 +++ b/discovery/openstack/hypervisor.go
-@@ -74,7 +74,7 @@ func (h *HypervisorDiscovery) refresh(ct
+@@ -76,7 +76,7 @@
  	}
  	// OpenStack API reference
  	// https://developer.openstack.org/api-ref/compute/#list-hypervisors-details
diff -pruN 2.31.2+ds1-1/debian/patches/series 2.33.5+ds1-2/debian/patches/series
--- 2.31.2+ds1-1/debian/patches/series	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/patches/series	2022-03-23 22:28:20.000000000 +0000
@@ -4,13 +4,15 @@
 06-Disable_TSDB_lockfile.patch
 07-Disable_fsnotify_mips64el.patch
 09-Fix_hanging_test.patch
-11-Set_temporary_storage_path_for_tsdb.patch
-01-Do_not_embed_blobs.patch
+10-Add_agent_ui.patch
+11-Disable_react_UI.patch
+12-Do_not_embed_blobs.patch
 13-Disable_jaeger.patch
 14-Disable_wal_test_goleak.patch
 15-Disable-uyuni.patch
 16-Disable_xds.patch
 17-Disable-linode.patch
 18-Disable-scaleway.patch
+19-Integer_overflows.patch
 91-Revert_switch_to_go_zookeeper.patch
 92-Revert-openstack-gophercloud-API-change.patch
diff -pruN 2.31.2+ds1-1/debian/rules 2.33.5+ds1-2/debian/rules
--- 2.31.2+ds1-1/debian/rules	2022-01-21 00:32:14.000000000 +0000
+++ 2.33.5+ds1-2/debian/rules	2022-03-23 22:28:20.000000000 +0000
@@ -7,16 +7,21 @@ include /usr/share/dpkg/pkg-info.mk
 
 # Include test fixtures.
 export DH_GOLANG_INSTALL_EXTRA := \
+    cmd/prometheus/testdata \
+    cmd/promtool/testdata \
     config/testdata \
     discovery/file/fixtures \
     discovery/http/fixtures \
+    discovery/moby/testdata \
     discovery/puppetdb/fixtures \
+    documentation/examples/prometheus-agent.yml \
     documentation/examples/prometheus.yml \
-    pkg/rulefmt/testdata \
+    model/rulefmt/testdata \
     promql/fuzz-data \
     promql/testdata \
     rules/fixtures \
     scrape/testdata \
+    tsdb/testdata \
     # EOL
 
 # Do not build examples.
@@ -73,6 +78,9 @@ override_dh_auto_build:
 	sed -i '/^.SH "NAME"/,+1c.SH "NAME"\n'$(WHATIS1) $(BUILDDIR)/prometheus.1
 	sed -i '/^.SH "NAME"/,+1c.SH "NAME"\n'$(WHATIS2) $(BUILDDIR)/promtool.1
 
+# Avoid test failures due to newer crypto/x509 requirements until upstream
+# updates the test CA certificate.
+override_dh_auto_test: export GODEBUG := x509sha1=1
 override_dh_auto_test:
 	dh_auto_test -- $(TESTFLAGS)
 
diff -pruN 2.31.2+ds1-1/discovery/aws/ec2.go 2.33.5+ds1-2/discovery/aws/ec2.go
--- 2.31.2+ds1-1/discovery/aws/ec2.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/aws/ec2.go	2022-03-08 16:34:32.000000000 +0000
@@ -63,13 +63,11 @@ const (
 	ec2LabelSeparator         = ","
 )
 
-var (
-	// DefaultEC2SDConfig is the default EC2 SD configuration.
-	DefaultEC2SDConfig = EC2SDConfig{
-		Port:            80,
-		RefreshInterval: model.Duration(60 * time.Second),
-	}
-)
+// DefaultEC2SDConfig is the default EC2 SD configuration.
+var DefaultEC2SDConfig = EC2SDConfig{
+	Port:            80,
+	RefreshInterval: model.Duration(60 * time.Second),
+}
 
 func init() {
 	discovery.RegisterConfig(&EC2SDConfig{})
diff -pruN 2.31.2+ds1-1/discovery/aws/lightsail.go 2.33.5+ds1-2/discovery/aws/lightsail.go
--- 2.31.2+ds1-1/discovery/aws/lightsail.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/aws/lightsail.go	2022-03-08 16:34:32.000000000 +0000
@@ -53,13 +53,11 @@ const (
 	lightsailLabelSeparator           = ","
 )
 
-var (
-	// DefaultLightsailSDConfig is the default Lightsail SD configuration.
-	DefaultLightsailSDConfig = LightsailSDConfig{
-		Port:            80,
-		RefreshInterval: model.Duration(60 * time.Second),
-	}
-)
+// DefaultLightsailSDConfig is the default Lightsail SD configuration.
+var DefaultLightsailSDConfig = LightsailSDConfig{
+	Port:            80,
+	RefreshInterval: model.Duration(60 * time.Second),
+}
 
 func init() {
 	discovery.RegisterConfig(&LightsailSDConfig{})
diff -pruN 2.31.2+ds1-1/discovery/azure/azure.go 2.33.5+ds1-2/discovery/azure/azure.go
--- 2.31.2+ds1-1/discovery/azure/azure.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/azure/azure.go	2022-03-08 16:34:32.000000000 +0000
@@ -339,7 +339,6 @@ func (d *Discovery) refresh(ctx context.
 			// Get the IP address information via separate call to the network provider.
 			for _, nicID := range vm.NetworkInterfaces {
 				networkInterface, err := client.getNetworkInterfaceByID(ctx, nicID)
-
 				if err != nil {
 					level.Error(d.logger).Log("msg", "Unable to get network interface", "name", nicID, "err", err)
 					ch <- target{labelSet: nil, err: err}
@@ -362,7 +361,9 @@ func (d *Discovery) refresh(ctx context.
 
 				if *networkInterface.Primary {
 					for _, ip := range *networkInterface.IPConfigurations {
-						if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil {
+						// IPAddress is a field defined in PublicIPAddressPropertiesFormat,
+						// therefore we need to validate that both are not nil.
+						if ip.PublicIPAddress != nil && ip.PublicIPAddress.PublicIPAddressPropertiesFormat != nil && ip.PublicIPAddress.IPAddress != nil {
 							labels[azureLabelMachinePublicIP] = model.LabelValue(*ip.PublicIPAddress.IPAddress)
 						}
 						if ip.PrivateIPAddress != nil {
@@ -437,9 +438,8 @@ func (client *azureClient) getScaleSets(
 
 func (client *azureClient) getScaleSetVMs(ctx context.Context, scaleSet compute.VirtualMachineScaleSet) ([]virtualMachine, error) {
 	var vms []virtualMachine
-	//TODO do we really need to fetch the resourcegroup this way?
+	// TODO do we really need to fetch the resourcegroup this way?
 	r, err := newAzureResourceFromID(*scaleSet.ID, nil)
-
 	if err != nil {
 		return nil, errors.Wrap(err, "could not parse scale set ID")
 	}
diff -pruN 2.31.2+ds1-1/discovery/consul/consul.go 2.33.5+ds1-2/discovery/consul/consul.go
--- 2.31.2+ds1-1/discovery/consul/consul.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/consul/consul.go	2022-03-08 16:34:32.000000000 +0000
@@ -54,7 +54,7 @@ const (
 	healthLabel = model.MetaLabelPrefix + "consul_health"
 	// serviceAddressLabel is the name of the label containing the (optional) service address.
 	serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
-	//servicePortLabel is the name of the label containing the service port.
+	// servicePortLabel is the name of the label containing the service port.
 	servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
 	// datacenterLabel is the name of the label containing the datacenter ID.
 	datacenterLabel = model.MetaLabelPrefix + "consul_dc"
@@ -297,6 +297,7 @@ func (d *Discovery) getDatacenter() erro
 	}
 
 	d.clientDatacenter = dc
+	d.logger = log.With(d.logger, "datacenter", dc)
 	return nil
 }
 
@@ -530,7 +531,7 @@ func (srv *consulService) watch(ctx cont
 	for _, serviceNode := range serviceNodes {
 		// We surround the separated list with the separator as well. This way regular expressions
 		// in relabeling rules don't have to consider tag positions.
-		var tags = srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
+		tags := srv.tagSeparator + strings.Join(serviceNode.Service.Tags, srv.tagSeparator) + srv.tagSeparator
 
 		// If the service address is not empty it should be used instead of the node address
 		// since the service may be registered remotely through a different node.
diff -pruN 2.31.2+ds1-1/discovery/consul/consul_test.go 2.33.5+ds1-2/discovery/consul/consul_test.go
--- 2.31.2+ds1-1/discovery/consul/consul_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/consul/consul_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -37,9 +37,9 @@ func TestMain(m *testing.M) {
 
 func TestConfiguredService(t *testing.T) {
 	conf := &SDConfig{
-		Services: []string{"configuredServiceName"}}
+		Services: []string{"configuredServiceName"},
+	}
 	consulDiscovery, err := NewDiscovery(conf, nil)
-
 	if err != nil {
 		t.Errorf("Unexpected error when initializing discovery %v", err)
 	}
@@ -57,7 +57,6 @@ func TestConfiguredServiceWithTag(t *tes
 		ServiceTags: []string{"http"},
 	}
 	consulDiscovery, err := NewDiscovery(conf, nil)
-
 	if err != nil {
 		t.Errorf("Unexpected error when initializing discovery %v", err)
 	}
@@ -153,7 +152,6 @@ func TestConfiguredServiceWithTags(t *te
 
 	for _, tc := range cases {
 		consulDiscovery, err := NewDiscovery(tc.conf, nil)
-
 		if err != nil {
 			t.Errorf("Unexpected error when initializing discovery %v", err)
 		}
@@ -168,7 +166,6 @@ func TestConfiguredServiceWithTags(t *te
 func TestNonConfiguredService(t *testing.T) {
 	conf := &SDConfig{}
 	consulDiscovery, err := NewDiscovery(conf, nil)
-
 	if err != nil {
 		t.Errorf("Unexpected error when initializing discovery %v", err)
 	}
@@ -310,11 +307,15 @@ func TestNoTargets(t *testing.T) {
 
 	ctx, cancel := context.WithCancel(context.Background())
 	ch := make(chan []*targetgroup.Group)
-	go d.Run(ctx, ch)
+	go func() {
+		d.Run(ctx, ch)
+		close(ch)
+	}()
 
 	targets := (<-ch)[0].Targets
 	require.Equal(t, 0, len(targets))
 	cancel()
+	<-ch
 }
 
 // Watch only the test service.
diff -pruN 2.31.2+ds1-1/discovery/digitalocean/mock_test.go 2.33.5+ds1-2/discovery/digitalocean/mock_test.go
--- 2.31.2+ds1-1/discovery/digitalocean/mock_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/digitalocean/mock_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -75,7 +75,8 @@ func (m *SDMock) HandleDropletsList() {
 				panic(err)
 			}
 		}
-		fmt.Fprint(w, []string{`
+		fmt.Fprint(w, []string{
+			`
 {
   "droplets": [
     {
diff -pruN 2.31.2+ds1-1/discovery/file/file.go 2.33.5+ds1-2/discovery/file/file.go
--- 2.31.2+ds1-1/discovery/file/file.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/file/file.go	2022-03-08 16:34:32.000000000 +0000
@@ -25,13 +25,13 @@ import (
 	"sync"
 	"time"
 
+	"github.com/fsnotify/fsnotify"
 	"github.com/go-kit/log"
 	"github.com/go-kit/log/level"
 	"github.com/pkg/errors"
 	"github.com/prometheus/client_golang/prometheus"
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
-	fsnotify "gopkg.in/fsnotify/fsnotify.v1"
 	yaml "gopkg.in/yaml.v2"
 
 	"github.com/prometheus/prometheus/discovery"
diff -pruN 2.31.2+ds1-1/discovery/file/file_test.go 2.33.5+ds1-2/discovery/file/file_test.go
--- 2.31.2+ds1-1/discovery/file/file_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/file/file_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -73,7 +73,7 @@ func (t *testRunner) copyFile(src string
 }
 
 // copyFileTo atomically copies a file with a different name to the runner's directory.
-func (t *testRunner) copyFileTo(src string, name string) string {
+func (t *testRunner) copyFileTo(src, name string) string {
 	t.Helper()
 
 	newf, err := ioutil.TempFile(t.dir, "")
@@ -95,7 +95,7 @@ func (t *testRunner) copyFileTo(src stri
 }
 
 // writeString writes atomically a string to a file.
-func (t *testRunner) writeString(file string, data string) {
+func (t *testRunner) writeString(file, data string) {
 	t.Helper()
 
 	newf, err := ioutil.TempFile(t.dir, "")
@@ -477,6 +477,7 @@ func TestRemoveFile(t *testing.T) {
 			},
 			{
 				Source: fileSource(sdFile, 1),
-			}},
+			},
+		},
 	)
 }
diff -pruN 2.31.2+ds1-1/discovery/hetzner/hcloud.go 2.33.5+ds1-2/discovery/hetzner/hcloud.go
--- 2.31.2+ds1-1/discovery/hetzner/hcloud.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/hetzner/hcloud.go	2022-03-08 16:34:32.000000000 +0000
@@ -78,6 +78,7 @@ func newHcloudDiscovery(conf *SDConfig,
 	)
 	return d, nil
 }
+
 func (d *hcloudDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
 	servers, err := d.client.Server.All(ctx)
 	if err != nil {
diff -pruN 2.31.2+ds1-1/discovery/hetzner/mock_test.go 2.33.5+ds1-2/discovery/hetzner/mock_test.go
--- 2.31.2+ds1-1/discovery/hetzner/mock_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/hetzner/mock_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -489,8 +489,10 @@ func (m *SDMock) HandleHcloudNetworks()
 	})
 }
 
-const robotTestUsername = "my-hetzner"
-const robotTestPassword = "my-password"
+const (
+	robotTestUsername = "my-hetzner"
+	robotTestPassword = "my-password"
+)
 
 // HandleRobotServers mocks the robot servers list endpoint.
 func (m *SDMock) HandleRobotServers() {
diff -pruN 2.31.2+ds1-1/discovery/hetzner/robot.go 2.33.5+ds1-2/discovery/hetzner/robot.go
--- 2.31.2+ds1-1/discovery/hetzner/robot.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/hetzner/robot.go	2022-03-08 16:34:32.000000000 +0000
@@ -70,6 +70,7 @@ func newRobotDiscovery(conf *SDConfig, l
 
 	return d, nil
 }
+
 func (d *robotDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
 	req, err := http.NewRequest("GET", d.endpoint+"/server", nil)
 	if err != nil {
diff -pruN 2.31.2+ds1-1/discovery/http/http_test.go 2.33.5+ds1-2/discovery/http/http_test.go
--- 2.31.2+ds1-1/discovery/http/http_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/http/http_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -24,8 +24,9 @@ import (
 	"github.com/go-kit/log"
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/discovery/targetgroup"
 	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/discovery/targetgroup"
 )
 
 func TestHTTPValidRefresh(t *testing.T) {
@@ -60,7 +61,6 @@ func TestHTTPValidRefresh(t *testing.T)
 		},
 	}
 	require.Equal(t, tgs, expectedTargets)
-
 }
 
 func TestHTTPInvalidCode(t *testing.T) {
@@ -398,5 +398,4 @@ func TestSourceDisappeared(t *testing.T)
 			require.Equal(t, test.expectedTargets[i], tgs)
 		}
 	}
-
 }
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/client_metrics.go 2.33.5+ds1-2/discovery/kubernetes/client_metrics.go
--- 2.31.2+ds1-1/discovery/kubernetes/client_metrics.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/client_metrics.go	2022-03-08 16:34:32.000000000 +0000
@@ -121,9 +121,11 @@ func (f *clientGoRequestMetricAdapter) R
 		clientGoRequestLatencyMetricVec,
 	)
 }
-func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code string, method string, host string) {
+
+func (clientGoRequestMetricAdapter) Increment(ctx context.Context, code, method, host string) {
 	clientGoRequestResultMetricVec.WithLabelValues(code).Inc()
 }
+
 func (clientGoRequestMetricAdapter) Observe(ctx context.Context, verb string, u url.URL, latency time.Duration) {
 	clientGoRequestLatencyMetricVec.WithLabelValues(u.EscapedPath()).Observe(latency.Seconds())
 }
@@ -146,21 +148,27 @@ func (f *clientGoWorkqueueMetricsProvide
 func (f *clientGoWorkqueueMetricsProvider) NewDepthMetric(name string) workqueue.GaugeMetric {
 	return clientGoWorkqueueDepthMetricVec.WithLabelValues(name)
 }
+
 func (f *clientGoWorkqueueMetricsProvider) NewAddsMetric(name string) workqueue.CounterMetric {
 	return clientGoWorkqueueAddsMetricVec.WithLabelValues(name)
 }
+
 func (f *clientGoWorkqueueMetricsProvider) NewLatencyMetric(name string) workqueue.HistogramMetric {
 	return clientGoWorkqueueLatencyMetricVec.WithLabelValues(name)
 }
+
 func (f *clientGoWorkqueueMetricsProvider) NewWorkDurationMetric(name string) workqueue.HistogramMetric {
 	return clientGoWorkqueueWorkDurationMetricVec.WithLabelValues(name)
 }
+
 func (f *clientGoWorkqueueMetricsProvider) NewUnfinishedWorkSecondsMetric(name string) workqueue.SettableGaugeMetric {
 	return clientGoWorkqueueUnfinishedWorkSecondsMetricVec.WithLabelValues(name)
 }
+
 func (f *clientGoWorkqueueMetricsProvider) NewLongestRunningProcessorSecondsMetric(name string) workqueue.SettableGaugeMetric {
 	return clientGoWorkqueueLongestRunningProcessorMetricVec.WithLabelValues(name)
 }
+
 func (clientGoWorkqueueMetricsProvider) NewRetriesMetric(name string) workqueue.CounterMetric {
 	// Retries are not used so the metric is omitted.
 	return noopMetric{}
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/endpoints.go 2.33.5+ds1-2/discovery/kubernetes/endpoints.go
--- 2.31.2+ds1-1/discovery/kubernetes/endpoints.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/endpoints.go	2022-03-08 16:34:32.000000000 +0000
@@ -18,8 +18,6 @@ import (
 	"net"
 	"strconv"
 
-	"github.com/prometheus/prometheus/util/strutil"
-
 	"github.com/go-kit/log"
 	"github.com/go-kit/log/level"
 	"github.com/pkg/errors"
@@ -29,6 +27,7 @@ import (
 	"k8s.io/client-go/util/workqueue"
 
 	"github.com/prometheus/prometheus/discovery/targetgroup"
+	"github.com/prometheus/prometheus/util/strutil"
 )
 
 var (
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/endpointslice_test.go 2.33.5+ds1-2/discovery/kubernetes/endpointslice_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/endpointslice_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/endpointslice_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -630,3 +630,87 @@ func TestEndpointSliceDiscoveryNamespace
 		},
 	}.Run(t)
 }
+
+func TestEndpointSliceDiscoveryOwnNamespace(t *testing.T) {
+	epOne := makeEndpointSlice()
+	epOne.Namespace = "own-ns"
+
+	epTwo := makeEndpointSlice()
+	epTwo.Namespace = "non-own-ns"
+
+	podOne := &v1.Pod{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      "testpod",
+			Namespace: "own-ns",
+			UID:       types.UID("deadbeef"),
+		},
+		Spec: v1.PodSpec{
+			NodeName: "testnode",
+			Containers: []v1.Container{
+				{
+					Name: "p1",
+					Ports: []v1.ContainerPort{
+						{
+							Name:          "mainport",
+							ContainerPort: 9000,
+							Protocol:      v1.ProtocolTCP,
+						},
+					},
+				},
+			},
+		},
+		Status: v1.PodStatus{
+			HostIP: "2.3.4.5",
+			PodIP:  "4.3.2.1",
+		},
+	}
+
+	podTwo := podOne.DeepCopy()
+	podTwo.Namespace = "non-own-ns"
+
+	objs := []runtime.Object{
+		epOne,
+		epTwo,
+		podOne,
+		podTwo,
+	}
+	n, _ := makeDiscovery(RoleEndpointSlice, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...)
+
+	k8sDiscoveryTest{
+		discovery:        n,
+		expectedMaxItems: 1,
+		expectedRes: map[string]*targetgroup.Group{
+			"endpointslice/own-ns/testendpoints": {
+				Targets: []model.LabelSet{
+					{
+						"__address__": "1.2.3.4:9000",
+						"__meta_kubernetes_endpointslice_endpoint_hostname": "testendpoint1",
+						"__meta_kubernetes_endpointslice_port":              "9000",
+						"__meta_kubernetes_endpointslice_port_name":         "testport",
+						"__meta_kubernetes_endpointslice_port_protocol":     "TCP",
+					},
+					{
+						"__address__": "2.3.4.5:9000",
+						"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "true",
+						"__meta_kubernetes_endpointslice_port":                      "9000",
+						"__meta_kubernetes_endpointslice_port_name":                 "testport",
+						"__meta_kubernetes_endpointslice_port_protocol":             "TCP",
+					},
+					{
+						"__address__": "3.4.5.6:9000",
+						"__meta_kubernetes_endpointslice_endpoint_conditions_ready": "false",
+						"__meta_kubernetes_endpointslice_port":                      "9000",
+						"__meta_kubernetes_endpointslice_port_name":                 "testport",
+						"__meta_kubernetes_endpointslice_port_protocol":             "TCP",
+					},
+				},
+				Labels: model.LabelSet{
+					"__meta_kubernetes_endpointslice_address_type": "IPv4",
+					"__meta_kubernetes_endpointslice_name":         "testendpoints",
+					"__meta_kubernetes_namespace":                  "own-ns",
+				},
+				Source: "endpointslice/own-ns/testendpoints",
+			},
+		},
+	}.Run(t)
+}
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/endpoints_test.go 2.33.5+ds1-2/discovery/kubernetes/endpoints_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/endpoints_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/endpoints_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -27,7 +27,7 @@ import (
 )
 
 func makeEndpoints() *v1.Endpoints {
-	var nodeName = "foobar"
+	nodeName := "foobar"
 	return &v1.Endpoints{
 		ObjectMeta: metav1.ObjectMeta{
 			Name:      "testendpoints",
@@ -614,4 +614,87 @@ func TestEndpointsDiscoveryNamespaces(t
 			},
 		},
 	}.Run(t)
+}
+
+func TestEndpointsDiscoveryOwnNamespace(t *testing.T) {
+	epOne := makeEndpoints()
+	epOne.Namespace = "own-ns"
+
+	epTwo := makeEndpoints()
+	epTwo.Namespace = "non-own-ns"
+
+	podOne := &v1.Pod{
+		ObjectMeta: metav1.ObjectMeta{
+			Name:      "testpod",
+			Namespace: "own-ns",
+			UID:       types.UID("deadbeef"),
+		},
+		Spec: v1.PodSpec{
+			NodeName: "testnode",
+			Containers: []v1.Container{
+				{
+					Name: "p1",
+					Ports: []v1.ContainerPort{
+						{
+							Name:          "mainport",
+							ContainerPort: 9000,
+							Protocol:      v1.ProtocolTCP,
+						},
+					},
+				},
+			},
+		},
+		Status: v1.PodStatus{
+			HostIP: "2.3.4.5",
+			PodIP:  "4.3.2.1",
+		},
+	}
+
+	podTwo := podOne.DeepCopy()
+	podTwo.Namespace = "non-own-ns"
+
+	objs := []runtime.Object{
+		epOne,
+		epTwo,
+		podOne,
+		podTwo,
+	}
+
+	n, _ := makeDiscovery(RoleEndpoint, NamespaceDiscovery{IncludeOwnNamespace: true}, objs...)
+
+	k8sDiscoveryTest{
+		discovery:        n,
+		expectedMaxItems: 1,
+		expectedRes: map[string]*targetgroup.Group{
+			"endpoints/own-ns/testendpoints": {
+				Targets: []model.LabelSet{
+					{
+						"__address__":                              "1.2.3.4:9000",
+						"__meta_kubernetes_endpoint_hostname":      "testendpoint1",
+						"__meta_kubernetes_endpoint_node_name":     "foobar",
+						"__meta_kubernetes_endpoint_port_name":     "testport",
+						"__meta_kubernetes_endpoint_port_protocol": "TCP",
+						"__meta_kubernetes_endpoint_ready":         "true",
+					},
+					{
+						"__address__":                              "2.3.4.5:9001",
+						"__meta_kubernetes_endpoint_port_name":     "testport",
+						"__meta_kubernetes_endpoint_port_protocol": "TCP",
+						"__meta_kubernetes_endpoint_ready":         "true",
+					},
+					{
+						"__address__":                              "2.3.4.5:9001",
+						"__meta_kubernetes_endpoint_port_name":     "testport",
+						"__meta_kubernetes_endpoint_port_protocol": "TCP",
+						"__meta_kubernetes_endpoint_ready":         "false",
+					},
+				},
+				Labels: model.LabelSet{
+					"__meta_kubernetes_namespace":      "own-ns",
+					"__meta_kubernetes_endpoints_name": "testendpoints",
+				},
+				Source: "endpoints/own-ns/testendpoints",
+			},
+		},
+	}.Run(t)
 }
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/ingress_test.go 2.33.5+ds1-2/discovery/kubernetes/ingress_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/ingress_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/ingress_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -323,3 +323,21 @@ func TestIngressDiscoveryNamespacesV1bet
 		expectedRes:      expected,
 	}.Run(t)
 }
+
+func TestIngressDiscoveryOwnNamespace(t *testing.T) {
+	n, c := makeDiscovery(RoleIngress, NamespaceDiscovery{IncludeOwnNamespace: true})
+
+	expected := expectedTargetGroups("own-ns", TLSNo)
+	k8sDiscoveryTest{
+		discovery: n,
+		afterStart: func() {
+			for _, ns := range []string{"own-ns", "non-own-ns"} {
+				obj := makeIngress(TLSNo)
+				obj.Namespace = ns
+				c.NetworkingV1().Ingresses(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
+			}
+		},
+		expectedMaxItems: 1,
+		expectedRes:      expected,
+	}.Run(t)
+}
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/kubernetes.go 2.33.5+ds1-2/discovery/kubernetes/kubernetes.go
--- 2.31.2+ds1-1/discovery/kubernetes/kubernetes.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/kubernetes.go	2022-03-08 16:34:32.000000000 +0000
@@ -16,6 +16,7 @@ package kubernetes
 import (
 	"context"
 	"fmt"
+	"io/ioutil"
 	"reflect"
 	"strings"
 	"sync"
@@ -48,7 +49,7 @@ import (
 )
 
 const (
-	// kubernetesMetaLabelPrefix is the meta prefix used for all meta labels.
+	// metaLabelPrefix is the meta prefix used for all meta labels.
 	// in this discovery.
 	metaLabelPrefix  = model.MetaLabelPrefix + "kubernetes_"
 	namespaceLabel   = metaLabelPrefix + "namespace"
@@ -183,6 +184,12 @@ func (c *SDConfig) UnmarshalYAML(unmarsh
 	if c.APIServer.URL == nil && !reflect.DeepEqual(c.HTTPClientConfig, config.DefaultHTTPClientConfig) {
 		return errors.Errorf("to use custom HTTP client configuration please provide the 'api_server' URL explicitly")
 	}
+	if c.APIServer.URL != nil && c.NamespaceDiscovery.IncludeOwnNamespace {
+		return errors.Errorf("cannot use 'api_server' and 'namespaces.own_namespace' simultaneously")
+	}
+	if c.KubeConfig != "" && c.NamespaceDiscovery.IncludeOwnNamespace {
+		return errors.Errorf("cannot use 'kubeconfig_file' and 'namespaces.own_namespace' simultaneously")
+	}
 
 	foundSelectorRoles := make(map[Role]struct{})
 	allowedSelectors := map[Role][]string{
@@ -230,7 +237,8 @@ func (c *SDConfig) UnmarshalYAML(unmarsh
 // NamespaceDiscovery is the configuration for discovering
 // Kubernetes namespaces.
 type NamespaceDiscovery struct {
-	Names []string `yaml:"names"`
+	IncludeOwnNamespace bool     `yaml:"own_namespace"`
+	Names               []string `yaml:"names"`
 }
 
 // UnmarshalYAML implements the yaml.Unmarshaler interface.
@@ -250,13 +258,21 @@ type Discovery struct {
 	namespaceDiscovery *NamespaceDiscovery
 	discoverers        []discovery.Discoverer
 	selectors          roleSelector
+	ownNamespace       string
 }
 
 func (d *Discovery) getNamespaces() []string {
 	namespaces := d.namespaceDiscovery.Names
-	if len(namespaces) == 0 {
-		namespaces = []string{apiv1.NamespaceAll}
+	includeOwnNamespace := d.namespaceDiscovery.IncludeOwnNamespace
+
+	if len(namespaces) == 0 && !includeOwnNamespace {
+		return []string{apiv1.NamespaceAll}
 	}
+
+	if includeOwnNamespace {
+		return append(namespaces, d.ownNamespace)
+	}
+
 	return namespaces
 }
 
@@ -266,8 +282,9 @@ func New(l log.Logger, conf *SDConfig) (
 		l = log.NewNopLogger()
 	}
 	var (
-		kcfg *rest.Config
-		err  error
+		kcfg         *rest.Config
+		err          error
+		ownNamespace string
 	)
 	if conf.KubeConfig != "" {
 		kcfg, err = clientcmd.BuildConfigFromFlags("", conf.KubeConfig)
@@ -281,6 +298,18 @@ func New(l log.Logger, conf *SDConfig) (
 		if err != nil {
 			return nil, err
 		}
+
+		if conf.NamespaceDiscovery.IncludeOwnNamespace {
+			ownNamespaceContents, err := ioutil.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace")
+			if err != nil {
+				return nil, fmt.Errorf("could not determine the pod's namespace: %w", err)
+			}
+			if len(ownNamespaceContents) == 0 {
+				return nil, errors.New("could not read own namespace name (empty file)")
+			}
+			ownNamespace = string(ownNamespaceContents)
+		}
+
 		level.Info(l).Log("msg", "Using pod service account via in-cluster config")
 	} else {
 		rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "kubernetes_sd")
@@ -299,6 +328,7 @@ func New(l log.Logger, conf *SDConfig) (
 	if err != nil {
 		return nil, err
 	}
+
 	return &Discovery{
 		client:             c,
 		logger:             l,
@@ -306,6 +336,7 @@ func New(l log.Logger, conf *SDConfig) (
 		namespaceDiscovery: &conf.NamespaceDiscovery,
 		discoverers:        make([]discovery.Discoverer, 0),
 		selectors:          mapSelector(conf.Selectors),
+		ownNamespace:       ownNamespace,
 	}, nil
 }
 
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/kubernetes_test.go 2.33.5+ds1-2/discovery/kubernetes/kubernetes_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/kubernetes_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/kubernetes_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -54,6 +54,7 @@ func makeDiscoveryWithVersion(role Role,
 		logger:             log.NewNopLogger(),
 		role:               role,
 		namespaceDiscovery: &nsDiscovery,
+		ownNamespace:       "own-ns",
 	}, clientset
 }
 
@@ -86,15 +87,18 @@ func (d k8sDiscoveryTest) Run(t *testing
 	// Ensure that discovery has a discoverer set. This prevents a race
 	// condition where the above go routine may or may not have set a
 	// discoverer yet.
+	lastDiscoverersCount := 0
+	dis := d.discovery.(*Discovery)
 	for {
-		dis := d.discovery.(*Discovery)
 		dis.RLock()
 		l := len(dis.discoverers)
 		dis.RUnlock()
-		if l > 0 {
+		if l > 0 && l == lastDiscoverersCount {
 			break
 		}
-		time.Sleep(10 * time.Millisecond)
+		time.Sleep(100 * time.Millisecond)
+
+		lastDiscoverersCount = l
 	}
 
 	resChan := make(chan map[string]*targetgroup.Group)
@@ -171,13 +175,15 @@ type hasSynced interface {
 	hasSynced() bool
 }
 
-var _ hasSynced = &Discovery{}
-var _ hasSynced = &Node{}
-var _ hasSynced = &Endpoints{}
-var _ hasSynced = &EndpointSlice{}
-var _ hasSynced = &Ingress{}
-var _ hasSynced = &Pod{}
-var _ hasSynced = &Service{}
+var (
+	_ hasSynced = &Discovery{}
+	_ hasSynced = &Node{}
+	_ hasSynced = &Endpoints{}
+	_ hasSynced = &EndpointSlice{}
+	_ hasSynced = &Ingress{}
+	_ hasSynced = &Pod{}
+	_ hasSynced = &Service{}
+)
 
 func (d *Discovery) hasSynced() bool {
 	d.RLock()
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/node.go 2.33.5+ds1-2/discovery/kubernetes/node.go
--- 2.31.2+ds1-1/discovery/kubernetes/node.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/node.go	2022-03-08 16:34:32.000000000 +0000
@@ -149,6 +149,7 @@ func nodeSourceFromName(name string) str
 
 const (
 	nodeNameLabel               = metaLabelPrefix + "node_name"
+	nodeProviderIDLabel         = metaLabelPrefix + "node_provider_id"
 	nodeLabelPrefix             = metaLabelPrefix + "node_label_"
 	nodeLabelPresentPrefix      = metaLabelPrefix + "node_labelpresent_"
 	nodeAnnotationPrefix        = metaLabelPrefix + "node_annotation_"
@@ -161,6 +162,7 @@ func nodeLabels(n *apiv1.Node) model.Lab
 	ls := make(model.LabelSet, 2*(len(n.Labels)+len(n.Annotations))+1)
 
 	ls[nodeNameLabel] = lv(n.Name)
+	ls[nodeProviderIDLabel] = lv(n.Spec.ProviderID)
 
 	for k, v := range n.Labels {
 		ln := strutil.SanitizeLabelName(k)
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/node_test.go 2.33.5+ds1-2/discovery/kubernetes/node_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/node_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/node_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -25,13 +25,16 @@ import (
 	"github.com/prometheus/prometheus/discovery/targetgroup"
 )
 
-func makeNode(name, address string, labels map[string]string, annotations map[string]string) *v1.Node {
+func makeNode(name, address, providerID string, labels, annotations map[string]string) *v1.Node {
 	return &v1.Node{
 		ObjectMeta: metav1.ObjectMeta{
 			Name:        name,
 			Labels:      labels,
 			Annotations: annotations,
 		},
+		Spec: v1.NodeSpec{
+			ProviderID: providerID,
+		},
 		Status: v1.NodeStatus{
 			Addresses: []v1.NodeAddress{
 				{
@@ -49,7 +52,7 @@ func makeNode(name, address string, labe
 }
 
 func makeEnumeratedNode(i int) *v1.Node {
-	return makeNode(fmt.Sprintf("test%d", i), "1.2.3.4", map[string]string{}, map[string]string{})
+	return makeNode(fmt.Sprintf("test%d", i), "1.2.3.4", fmt.Sprintf("aws:///de-west-3a/i-%d", i), map[string]string{}, map[string]string{})
 }
 
 func TestNodeDiscoveryBeforeStart(t *testing.T) {
@@ -61,6 +64,7 @@ func TestNodeDiscoveryBeforeStart(t *tes
 			obj := makeNode(
 				"test",
 				"1.2.3.4",
+				"aws:///nl-north-7b/i-03149834983492827",
 				map[string]string{"test-label": "testvalue"},
 				map[string]string{"test-annotation": "testannotationvalue"},
 			)
@@ -78,6 +82,7 @@ func TestNodeDiscoveryBeforeStart(t *tes
 				},
 				Labels: model.LabelSet{
 					"__meta_kubernetes_node_name":                              "test",
+					"__meta_kubernetes_node_provider_id":                       "aws:///nl-north-7b/i-03149834983492827",
 					"__meta_kubernetes_node_label_test_label":                  "testvalue",
 					"__meta_kubernetes_node_labelpresent_test_label":           "true",
 					"__meta_kubernetes_node_annotation_test_annotation":        "testannotationvalue",
@@ -109,7 +114,8 @@ func TestNodeDiscoveryAdd(t *testing.T)
 					},
 				},
 				Labels: model.LabelSet{
-					"__meta_kubernetes_node_name": "test1",
+					"__meta_kubernetes_node_name":        "test1",
+					"__meta_kubernetes_node_provider_id": "aws:///de-west-3a/i-1",
 				},
 				Source: "node/test1",
 			},
@@ -146,6 +152,7 @@ func TestNodeDiscoveryUpdate(t *testing.
 			obj2 := makeNode(
 				"test0",
 				"1.2.3.4",
+				"aws:///fr-south-1c/i-49508290343823952",
 				map[string]string{"Unschedulable": "true"},
 				map[string]string{},
 			)
@@ -165,6 +172,7 @@ func TestNodeDiscoveryUpdate(t *testing.
 					"__meta_kubernetes_node_label_Unschedulable":        "true",
 					"__meta_kubernetes_node_labelpresent_Unschedulable": "true",
 					"__meta_kubernetes_node_name":                       "test0",
+					"__meta_kubernetes_node_provider_id":                "aws:///fr-south-1c/i-49508290343823952",
 				},
 				Source: "node/test0",
 			},
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/pod_test.go 2.33.5+ds1-2/discovery/kubernetes/pod_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/pod_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/pod_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -389,3 +389,21 @@ func TestPodDiscoveryNamespaces(t *testi
 		expectedRes:      expected,
 	}.Run(t)
 }
+
+func TestPodDiscoveryOwnNamespace(t *testing.T) {
+	n, c := makeDiscovery(RolePod, NamespaceDiscovery{IncludeOwnNamespace: true})
+
+	expected := expectedPodTargetGroups("own-ns")
+	k8sDiscoveryTest{
+		discovery: n,
+		beforeRun: func() {
+			for _, ns := range []string{"own-ns", "non-own-ns"} {
+				pod := makePods()
+				pod.Namespace = ns
+				c.CoreV1().Pods(pod.Namespace).Create(context.Background(), pod, metav1.CreateOptions{})
+			}
+		},
+		expectedMaxItems: 1,
+		expectedRes:      expected,
+	}.Run(t)
+}
diff -pruN 2.31.2+ds1-1/discovery/kubernetes/service_test.go 2.33.5+ds1-2/discovery/kubernetes/service_test.go
--- 2.31.2+ds1-1/discovery/kubernetes/service_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/kubernetes/service_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -254,3 +254,87 @@ func TestServiceDiscoveryNamespaces(t *t
 		},
 	}.Run(t)
 }
+
+func TestServiceDiscoveryOwnNamespace(t *testing.T) {
+	n, c := makeDiscovery(RoleService, NamespaceDiscovery{IncludeOwnNamespace: true})
+
+	k8sDiscoveryTest{
+		discovery: n,
+		afterStart: func() {
+			for _, ns := range []string{"own-ns", "non-own-ns"} {
+				obj := makeService()
+				obj.Namespace = ns
+				c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
+			}
+		},
+		expectedMaxItems: 1,
+		expectedRes: map[string]*targetgroup.Group{
+			"svc/own-ns/testservice": {
+				Targets: []model.LabelSet{
+					{
+						"__meta_kubernetes_service_port_protocol": "TCP",
+						"__address__":                          "testservice.own-ns.svc:30900",
+						"__meta_kubernetes_service_type":       "ClusterIP",
+						"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
+						"__meta_kubernetes_service_port_name":  "testport",
+					},
+				},
+				Labels: model.LabelSet{
+					"__meta_kubernetes_service_name": "testservice",
+					"__meta_kubernetes_namespace":    "own-ns",
+				},
+				Source: "svc/own-ns/testservice",
+			},
+		},
+	}.Run(t)
+}
+
+func TestServiceDiscoveryAllNamespaces(t *testing.T) {
+	n, c := makeDiscovery(RoleService, NamespaceDiscovery{})
+
+	k8sDiscoveryTest{
+		discovery: n,
+		afterStart: func() {
+			for _, ns := range []string{"own-ns", "non-own-ns"} {
+				obj := makeService()
+				obj.Namespace = ns
+				c.CoreV1().Services(obj.Namespace).Create(context.Background(), obj, metav1.CreateOptions{})
+			}
+		},
+		expectedMaxItems: 2,
+		expectedRes: map[string]*targetgroup.Group{
+			"svc/own-ns/testservice": {
+				Targets: []model.LabelSet{
+					{
+						"__meta_kubernetes_service_port_protocol": "TCP",
+						"__address__":                          "testservice.own-ns.svc:30900",
+						"__meta_kubernetes_service_type":       "ClusterIP",
+						"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
+						"__meta_kubernetes_service_port_name":  "testport",
+					},
+				},
+				Labels: model.LabelSet{
+					"__meta_kubernetes_service_name": "testservice",
+					"__meta_kubernetes_namespace":    "own-ns",
+				},
+				Source: "svc/own-ns/testservice",
+			},
+			"svc/non-own-ns/testservice": {
+				Targets: []model.LabelSet{
+					{
+						"__meta_kubernetes_service_port_protocol": "TCP",
+						"__address__":                          "testservice.non-own-ns.svc:30900",
+						"__meta_kubernetes_service_type":       "ClusterIP",
+						"__meta_kubernetes_service_cluster_ip": "10.0.0.1",
+						"__meta_kubernetes_service_port_name":  "testport",
+					},
+				},
+				Labels: model.LabelSet{
+					"__meta_kubernetes_service_name": "testservice",
+					"__meta_kubernetes_namespace":    "non-own-ns",
+				},
+				Source: "svc/non-own-ns/testservice",
+			},
+		},
+	}.Run(t)
+}
diff -pruN 2.31.2+ds1-1/discovery/legacymanager/manager_test.go 2.33.5+ds1-2/discovery/legacymanager/manager_test.go
--- 2.31.2+ds1-1/discovery/legacymanager/manager_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/legacymanager/manager_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -37,7 +37,6 @@ func TestMain(m *testing.M) {
 
 // TestTargetUpdatesOrder checks that the target updates are received in the expected order.
 func TestTargetUpdatesOrder(t *testing.T) {
-
 	// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
 	// Final targets array is ordered alphabetically by the name of the discoverer.
 	// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
@@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T
 							{
 								Source:  "tp1_group2",
 								Targets: []model.LabelSet{{"__instance__": "2"}},
-							}},
+							},
+						},
 					},
 				},
 			},
@@ -668,13 +668,16 @@ func TestTargetUpdatesOrder(t *testing.T
 			discoveryManager.updatert = 100 * time.Millisecond
 
 			var totalUpdatesCount int
-			provUpdates := make(chan []*targetgroup.Group)
 			for _, up := range tc.updates {
-				go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
 				if len(up) > 0 {
 					totalUpdatesCount += len(up)
 				}
 			}
+			provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount)
+
+			for _, up := range tc.updates {
+				go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
+			}
 
 			for x := 0; x < totalUpdatesCount; x++ {
 				select {
@@ -729,14 +732,12 @@ func verifyPresence(t *testing.T, tSets
 	match := false
 	var mergedTargets string
 	for _, targetGroup := range tSets[poolKey] {
-
 		for _, l := range targetGroup.Targets {
 			mergedTargets = mergedTargets + " " + l.String()
 			if l.String() == label {
 				match = true
 			}
 		}
-
 	}
 	if match != present {
 		msg := ""
@@ -926,7 +927,6 @@ func TestGaugeFailedConfigs(t *testing.T
 	if failedCount != 0 {
 		t.Fatalf("Expected to get no failed config, got: %v", failedCount)
 	}
-
 }
 
 func TestCoordinationWithReceiver(t *testing.T) {
diff -pruN 2.31.2+ds1-1/discovery/linode/linode.go 2.33.5+ds1-2/discovery/linode/linode.go
--- 2.31.2+ds1-1/discovery/linode/linode.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/linode/linode.go	2022-03-08 16:34:32.000000000 +0000
@@ -161,8 +161,12 @@ func (d *Discovery) refresh(ctx context.
 
 	if d.lastResults != nil && d.eventPollingEnabled {
 		// Check to see if there have been any events. If so, refresh our data.
-		opts := linodego.NewListOptions(1, fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")))
-		events, err := d.client.ListEvents(ctx, opts)
+		opts := linodego.ListOptions{
+			PageOptions: &linodego.PageOptions{Page: 1},
+			PageSize:    25,
+			Filter:      fmt.Sprintf(filterTemplate, d.lastRefreshTimestamp.Format("2006-01-02T15:04:05")),
+		}
+		events, err := d.client.ListEvents(ctx, &opts)
 		if err != nil {
 			var e *linodego.Error
 			if errors.As(err, &e) && e.Code == http.StatusUnauthorized {
@@ -205,13 +209,13 @@ func (d *Discovery) refreshData(ctx cont
 	}
 
 	// Gather all linode instances.
-	instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{})
+	instances, err := d.client.ListInstances(ctx, &linodego.ListOptions{PageSize: 500})
 	if err != nil {
 		return nil, err
 	}
 
 	// Gather detailed IP address info for all IPs on all linode instances.
-	detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{})
+	detailedIPs, err := d.client.ListIPAddresses(ctx, &linodego.ListOptions{PageSize: 500})
 	if err != nil {
 		return nil, err
 	}
diff -pruN 2.31.2+ds1-1/discovery/linode/linode_test.go 2.33.5+ds1-2/discovery/linode/linode_test.go
--- 2.31.2+ds1-1/discovery/linode/linode_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/linode/linode_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -56,7 +56,7 @@ func TestLinodeSDRefresh(t *testing.T) {
 	require.NoError(t, err)
 	endpoint, err := url.Parse(sdmock.Mock.Endpoint())
 	require.NoError(t, err)
-	d.client.SetBaseURL(fmt.Sprintf("%s/v4", endpoint.String()))
+	d.client.SetBaseURL(endpoint.String())
 
 	tgs, err := d.refresh(context.Background())
 	require.NoError(t, err)
diff -pruN 2.31.2+ds1-1/discovery/manager_test.go 2.33.5+ds1-2/discovery/manager_test.go
--- 2.31.2+ds1-1/discovery/manager_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/manager_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -37,7 +37,6 @@ func TestMain(m *testing.M) {
 
 // TestTargetUpdatesOrder checks that the target updates are received in the expected order.
 func TestTargetUpdatesOrder(t *testing.T) {
-
 	// The order by which the updates are send is determined by the interval passed to the mock discovery adapter
 	// Final targets array is ordered alphabetically by the name of the discoverer.
 	// For example discoverer "A" with targets "t2,t3" and discoverer "B" with targets "t1,t2" will result in "t2,t3,t1,t2" after the merge.
@@ -117,7 +116,8 @@ func TestTargetUpdatesOrder(t *testing.T
 							{
 								Source:  "tp1_group2",
 								Targets: []model.LabelSet{{"__instance__": "2"}},
-							}},
+							},
+						},
 					},
 				},
 			},
@@ -668,13 +668,16 @@ func TestTargetUpdatesOrder(t *testing.T
 			discoveryManager.updatert = 100 * time.Millisecond
 
 			var totalUpdatesCount int
-			provUpdates := make(chan []*targetgroup.Group)
 			for _, up := range tc.updates {
-				go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
 				if len(up) > 0 {
 					totalUpdatesCount += len(up)
 				}
 			}
+			provUpdates := make(chan []*targetgroup.Group, totalUpdatesCount)
+
+			for _, up := range tc.updates {
+				go newMockDiscoveryProvider(up...).Run(ctx, provUpdates)
+			}
 
 			for x := 0; x < totalUpdatesCount; x++ {
 				select {
@@ -719,7 +722,7 @@ func staticConfig(addrs ...string) Stati
 	return cfg
 }
 
-func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key string, label string, present bool) {
+func verifySyncedPresence(t *testing.T, tGroups map[string][]*targetgroup.Group, key, label string, present bool) {
 	t.Helper()
 	if _, ok := tGroups[key]; !ok {
 		t.Fatalf("'%s' should be present in Group map keys: %v", key, tGroups)
@@ -734,7 +737,6 @@ func verifySyncedPresence(t *testing.T,
 				match = true
 			}
 		}
-
 	}
 	if match != present {
 		msg := ""
@@ -755,14 +757,12 @@ func verifyPresence(t *testing.T, tSets
 	match := false
 	var mergedTargets string
 	for _, targetGroup := range tSets[poolKey] {
-
 		for _, l := range targetGroup.Targets {
 			mergedTargets = mergedTargets + " " + l.String()
 			if l.String() == label {
 				match = true
 			}
 		}
-
 	}
 	if match != present {
 		msg := ""
@@ -1062,7 +1062,6 @@ func TestTargetSetRecreatesEmptyStaticCo
 	if lbls := syncedTargets["prometheus"][0].Labels; lbls != nil {
 		t.Fatalf("Unexpected Group: expected nil Labels, got %v", lbls)
 	}
-
 }
 
 func TestIdenticalConfigurationsAreCoalesced(t *testing.T) {
@@ -1179,7 +1178,6 @@ func TestGaugeFailedConfigs(t *testing.T
 	if failedCount != 0 {
 		t.Fatalf("Expected to get no failed config, got: %v", failedCount)
 	}
-
 }
 
 func TestCoordinationWithReceiver(t *testing.T) {
@@ -1371,7 +1369,11 @@ func (tp mockdiscoveryProvider) Run(ctx
 		for i := range u.targetGroups {
 			tgs[i] = &u.targetGroups[i]
 		}
-		upCh <- tgs
+		select {
+		case <-ctx.Done():
+			return
+		case upCh <- tgs:
+		}
 	}
 	<-ctx.Done()
 }
diff -pruN 2.31.2+ds1-1/discovery/marathon/marathon.go 2.33.5+ds1-2/discovery/marathon/marathon.go
--- 2.31.2+ds1-1/discovery/marathon/marathon.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/marathon/marathon.go	2022-03-08 16:34:32.000000000 +0000
@@ -478,7 +478,6 @@ func targetsForApp(app *app) []model.Lab
 
 // Generate a target endpoint string in host:port format.
 func targetEndpoint(task *task, port uint32, containerNet bool) string {
-
 	var host string
 
 	// Use the task's ipAddress field when it's in a container network
@@ -493,7 +492,6 @@ func targetEndpoint(task *task, port uin
 
 // Get a list of ports and a list of labels from a PortMapping.
 func extractPortMapping(portMappings []portMapping, containerNet bool) ([]uint32, []map[string]string) {
-
 	ports := make([]uint32, len(portMappings))
 	labels := make([]map[string]string, len(portMappings))
 
diff -pruN 2.31.2+ds1-1/discovery/marathon/marathon_test.go 2.33.5+ds1-2/discovery/marathon/marathon_test.go
--- 2.31.2+ds1-1/discovery/marathon/marathon_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/marathon/marathon_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -29,11 +29,14 @@ import (
 var (
 	marathonValidLabel = map[string]string{"prometheus": "yes"}
 	testServers        = []string{"http://localhost:8080"}
-	conf               = SDConfig{Servers: testServers}
 )
 
+func testConfig() SDConfig {
+	return SDConfig{Servers: testServers}
+}
+
 func testUpdateServices(client appListClient) ([]*targetgroup.Group, error) {
-	md, err := NewDiscovery(conf, nil)
+	md, err := NewDiscovery(testConfig(), nil)
 	if err != nil {
 		return nil, err
 	}
@@ -60,9 +63,7 @@ func TestMarathonSDHandleError(t *testin
 }
 
 func TestMarathonSDEmptyList(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) { return &appList{}, nil }
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -99,11 +100,9 @@ func marathonTestAppList(labels map[stri
 }
 
 func TestMarathonSDSendGroup(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppList(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppList(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -130,7 +129,7 @@ func TestMarathonSDSendGroup(t *testing.
 }
 
 func TestMarathonSDRemoveApp(t *testing.T) {
-	md, err := NewDiscovery(conf, nil)
+	md, err := NewDiscovery(testConfig(), nil)
 	if err != nil {
 		t.Fatalf("%s", err)
 	}
@@ -195,11 +194,9 @@ func marathonTestAppListWithMultiplePort
 }
 
 func TestMarathonSDSendGroupWithMultiplePort(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithMultiplePorts(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -254,11 +251,9 @@ func marathonTestZeroTaskPortAppList(lab
 }
 
 func TestMarathonZeroTaskPorts(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestZeroTaskPortAppList(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -286,13 +281,6 @@ func Test500ErrorHttpResponseWithValidJS
 	// Create a test server with mock HTTP handler.
 	ts := httptest.NewServer(http.HandlerFunc(respHandler))
 	defer ts.Close()
-	// Backup conf for future tests.
-	backupConf := conf
-	defer func() {
-		conf = backupConf
-	}()
-	// Setup conf for the test case.
-	conf = SDConfig{Servers: []string{ts.URL}}
 	// Execute test case and validate behavior.
 	_, err := testUpdateServices(nil)
 	if err == nil {
@@ -331,11 +319,9 @@ func marathonTestAppListWithPortDefiniti
 }
 
 func TestMarathonSDSendGroupWithPortDefinitions(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithPortDefinitions(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -403,11 +389,9 @@ func marathonTestAppListWithPortDefiniti
 }
 
 func TestMarathonSDSendGroupWithPortDefinitionsRequirePorts(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithPortDefinitionsRequirePorts(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -470,11 +454,9 @@ func marathonTestAppListWithPorts(labels
 }
 
 func TestMarathonSDSendGroupWithPorts(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithPorts(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -546,11 +528,9 @@ func marathonTestAppListWithContainerPor
 }
 
 func TestMarathonSDSendGroupWithContainerPortMappings(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithContainerPortMappings(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -622,11 +602,9 @@ func marathonTestAppListWithDockerContai
 }
 
 func TestMarathonSDSendGroupWithDockerContainerPortMappings(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithDockerContainerPortMappings(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
@@ -702,11 +680,9 @@ func marathonTestAppListWithContainerNet
 }
 
 func TestMarathonSDSendGroupWithContainerNetworkAndPortMapping(t *testing.T) {
-	var (
-		client = func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
-			return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
-		}
-	)
+	client := func(_ context.Context, _ *http.Client, _ string) (*appList, error) {
+		return marathonTestAppListWithContainerNetworkAndPortMappings(marathonValidLabel, 1), nil
+	}
 	tgs, err := testUpdateServices(client)
 	if err != nil {
 		t.Fatalf("Got error: %s", err)
diff -pruN 2.31.2+ds1-1/discovery/moby/docker.go 2.33.5+ds1-2/discovery/moby/docker.go
--- 2.31.2+ds1-1/discovery/moby/docker.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/moby/docker.go	2022-03-08 16:34:32.000000000 +0000
@@ -28,6 +28,7 @@ import (
 	"github.com/go-kit/log"
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
+
 	"github.com/prometheus/prometheus/discovery"
 	"github.com/prometheus/prometheus/discovery/refresh"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
diff -pruN 2.31.2+ds1-1/discovery/moby/network.go 2.33.5+ds1-2/discovery/moby/network.go
--- 2.31.2+ds1-1/discovery/moby/network.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/moby/network.go	2022-03-08 16:34:32.000000000 +0000
@@ -19,6 +19,7 @@ import (
 
 	"github.com/docker/docker/api/types"
 	"github.com/docker/docker/client"
+
 	"github.com/prometheus/prometheus/util/strutil"
 )
 
diff -pruN 2.31.2+ds1-1/discovery/openstack/hypervisor.go 2.33.5+ds1-2/discovery/openstack/hypervisor.go
--- 2.31.2+ds1-1/discovery/openstack/hypervisor.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/openstack/hypervisor.go	2022-03-08 16:34:32.000000000 +0000
@@ -51,8 +51,10 @@ type HypervisorDiscovery struct {
 // newHypervisorDiscovery returns a new hypervisor discovery.
 func newHypervisorDiscovery(provider *gophercloud.ProviderClient, opts *gophercloud.AuthOptions,
 	port int, region string, availability gophercloud.Availability, l log.Logger) *HypervisorDiscovery {
-	return &HypervisorDiscovery{provider: provider, authOpts: opts,
-		region: region, port: port, availability: availability, logger: l}
+	return &HypervisorDiscovery{
+		provider: provider, authOpts: opts,
+		region: region, port: port, availability: availability, logger: l,
+	}
 }
 
 func (h *HypervisorDiscovery) refresh(ctx context.Context) ([]*targetgroup.Group, error) {
diff -pruN 2.31.2+ds1-1/discovery/openstack/hypervisor_test.go 2.33.5+ds1-2/discovery/openstack/hypervisor_test.go
--- 2.31.2+ds1-1/discovery/openstack/hypervisor_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/openstack/hypervisor_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -47,7 +47,6 @@ func (s *OpenstackSDHypervisorTestSuite)
 }
 
 func TestOpenstackSDHypervisorRefresh(t *testing.T) {
-
 	mock := &OpenstackSDHypervisorTestSuite{}
 	mock.SetupTest(t)
 
diff -pruN 2.31.2+ds1-1/discovery/openstack/instance.go 2.33.5+ds1-2/discovery/openstack/instance.go
--- 2.31.2+ds1-1/discovery/openstack/instance.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/openstack/instance.go	2022-03-08 16:34:32.000000000 +0000
@@ -63,8 +63,10 @@ func newInstanceDiscovery(provider *goph
 	if l == nil {
 		l = log.NewNopLogger()
 	}
-	return &InstanceDiscovery{provider: provider, authOpts: opts,
-		region: region, port: port, allTenants: allTenants, availability: availability, logger: l}
+	return &InstanceDiscovery{
+		provider: provider, authOpts: opts,
+		region: region, port: port, allTenants: allTenants, availability: availability, logger: l,
+	}
 }
 
 type floatingIPKey struct {
diff -pruN 2.31.2+ds1-1/discovery/openstack/instance_test.go 2.33.5+ds1-2/discovery/openstack/instance_test.go
--- 2.31.2+ds1-1/discovery/openstack/instance_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/openstack/instance_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -51,7 +51,6 @@ func (s *OpenstackSDInstanceTestSuite) o
 }
 
 func TestOpenstackSDInstanceRefresh(t *testing.T) {
-
 	mock := &OpenstackSDInstanceTestSuite{}
 	mock.SetupTest(t)
 
diff -pruN 2.31.2+ds1-1/discovery/openstack/mock_test.go 2.33.5+ds1-2/discovery/openstack/mock_test.go
--- 2.31.2+ds1-1/discovery/openstack/mock_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/openstack/mock_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -54,7 +54,7 @@ func testMethod(t *testing.T, r *http.Re
 	}
 }
 
-func testHeader(t *testing.T, r *http.Request, header string, expected string) {
+func testHeader(t *testing.T, r *http.Request, header, expected string) {
 	if actual := r.Header.Get(header); expected != actual {
 		t.Errorf("Header %s = %s, expected %s", header, actual, expected)
 	}
diff -pruN 2.31.2+ds1-1/discovery/openstack/openstack.go 2.33.5+ds1-2/discovery/openstack/openstack.go
--- 2.31.2+ds1-1/discovery/openstack/openstack.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/openstack/openstack.go	2022-03-08 16:34:32.000000000 +0000
@@ -145,7 +145,6 @@ func NewDiscovery(conf *SDConfig, l log.
 		time.Duration(conf.RefreshInterval),
 		r.refresh,
 	), nil
-
 }
 
 func newRefresher(conf *SDConfig, l log.Logger) (refresher, error) {
diff -pruN 2.31.2+ds1-1/discovery/puppetdb/puppetdb_test.go 2.33.5+ds1-2/discovery/puppetdb/puppetdb_test.go
--- 2.31.2+ds1-1/discovery/puppetdb/puppetdb_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/puppetdb/puppetdb_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -25,8 +25,9 @@ import (
 	"github.com/go-kit/log"
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
-	"github.com/prometheus/prometheus/discovery/targetgroup"
 	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/discovery/targetgroup"
 )
 
 func mockServer(t *testing.T) *httptest.Server {
diff -pruN 2.31.2+ds1-1/discovery/puppetdb/resources.go 2.33.5+ds1-2/discovery/puppetdb/resources.go
--- 2.31.2+ds1-1/discovery/puppetdb/resources.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/puppetdb/resources.go	2022-03-08 16:34:32.000000000 +0000
@@ -18,6 +18,7 @@ import (
 	"strings"
 
 	"github.com/prometheus/common/model"
+
 	"github.com/prometheus/prometheus/util/strutil"
 )
 
diff -pruN 2.31.2+ds1-1/discovery/README.md 2.33.5+ds1-2/discovery/README.md
--- 2.31.2+ds1-1/discovery/README.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/README.md	2022-03-08 16:34:32.000000000 +0000
@@ -131,7 +131,7 @@ the Prometheus server will be able to se
 
 ### The SD interface
 
-A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://pkg.go.dev/github.com/prometheus/prometheus/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups.
+A Service Discovery (SD) mechanism has to discover targets and provide them to Prometheus. We expect similar targets to be grouped together, in the form of a [target group](https://pkg.go.dev/github.com/prometheus/prometheus@v1.8.2-0.20211105201321-411021ada9ab/discovery/targetgroup#Group). The SD mechanism sends the targets down to prometheus as list of target groups.
 
 An SD mechanism has to implement the `Discoverer` Interface:
 ```go
diff -pruN 2.31.2+ds1-1/discovery/scaleway/baremetal.go 2.33.5+ds1-2/discovery/scaleway/baremetal.go
--- 2.31.2+ds1-1/discovery/scaleway/baremetal.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/scaleway/baremetal.go	2022-03-08 16:34:32.000000000 +0000
@@ -25,10 +25,11 @@ import (
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/common/version"
-	"github.com/prometheus/prometheus/discovery/refresh"
-	"github.com/prometheus/prometheus/discovery/targetgroup"
 	"github.com/scaleway/scaleway-sdk-go/api/baremetal/v1"
 	"github.com/scaleway/scaleway-sdk-go/scw"
+
+	"github.com/prometheus/prometheus/discovery/refresh"
+	"github.com/prometheus/prometheus/discovery/targetgroup"
 )
 
 type baremetalDiscovery struct {
diff -pruN 2.31.2+ds1-1/discovery/scaleway/instance.go 2.33.5+ds1-2/discovery/scaleway/instance.go
--- 2.31.2+ds1-1/discovery/scaleway/instance.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/scaleway/instance.go	2022-03-08 16:34:32.000000000 +0000
@@ -25,10 +25,11 @@ import (
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
 	"github.com/prometheus/common/version"
-	"github.com/prometheus/prometheus/discovery/refresh"
-	"github.com/prometheus/prometheus/discovery/targetgroup"
 	"github.com/scaleway/scaleway-sdk-go/api/instance/v1"
 	"github.com/scaleway/scaleway-sdk-go/scw"
+
+	"github.com/prometheus/prometheus/discovery/refresh"
+	"github.com/prometheus/prometheus/discovery/targetgroup"
 )
 
 const (
diff -pruN 2.31.2+ds1-1/discovery/scaleway/scaleway.go 2.33.5+ds1-2/discovery/scaleway/scaleway.go
--- 2.31.2+ds1-1/discovery/scaleway/scaleway.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/scaleway/scaleway.go	2022-03-08 16:34:32.000000000 +0000
@@ -24,10 +24,11 @@ import (
 	"github.com/pkg/errors"
 	"github.com/prometheus/common/config"
 	"github.com/prometheus/common/model"
+	"github.com/scaleway/scaleway-sdk-go/scw"
+
 	"github.com/prometheus/prometheus/discovery"
 	"github.com/prometheus/prometheus/discovery/refresh"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
-	"github.com/scaleway/scaleway-sdk-go/scw"
 )
 
 // metaLabelPrefix is the meta prefix used for all meta labels.
@@ -173,8 +174,7 @@ func init() {
 
 // Discovery periodically performs Scaleway requests. It implements
 // the Discoverer interface.
-type Discovery struct {
-}
+type Discovery struct{}
 
 func NewDiscovery(conf *SDConfig, logger log.Logger) (*refresh.Discovery, error) {
 	r, err := newRefresher(conf)
diff -pruN 2.31.2+ds1-1/discovery/targetgroup/targetgroup_test.go 2.33.5+ds1-2/discovery/targetgroup/targetgroup_test.go
--- 2.31.2+ds1-1/discovery/targetgroup/targetgroup_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/targetgroup/targetgroup_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -38,7 +38,8 @@ func TestTargetGroupStrictJsonUnmarshal(
 			expectedReply: nil,
 			expectedGroup: Group{Targets: []model.LabelSet{
 				{"__address__": "localhost:9090"},
-				{"__address__": "localhost:9091"}}, Labels: model.LabelSet{"my": "label"}},
+				{"__address__": "localhost:9091"},
+			}, Labels: model.LabelSet{"my": "label"}},
 		},
 		{
 			json: `	{"label": {},"targets": []}`,
@@ -56,7 +57,6 @@ func TestTargetGroupStrictJsonUnmarshal(
 		require.Equal(t, test.expectedReply, actual)
 		require.Equal(t, test.expectedGroup, tg)
 	}
-
 }
 
 func TestTargetGroupYamlMarshal(t *testing.T) {
@@ -81,10 +81,13 @@ func TestTargetGroupYamlMarshal(t *testi
 		},
 		{
 			// targets only exposes addresses.
-			group: Group{Targets: []model.LabelSet{
-				{"__address__": "localhost:9090"},
-				{"__address__": "localhost:9091"}},
-				Labels: model.LabelSet{"foo": "bar", "bar": "baz"}},
+			group: Group{
+				Targets: []model.LabelSet{
+					{"__address__": "localhost:9090"},
+					{"__address__": "localhost:9091"},
+				},
+				Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
+			},
 			expectedYaml: "targets:\n- localhost:9090\n- localhost:9091\nlabels:\n  bar: baz\n  foo: bar\n",
 			expectedErr:  nil,
 		},
@@ -120,7 +123,8 @@ func TestTargetGroupYamlUnmarshal(t *tes
 			expectedReply: nil,
 			expectedGroup: Group{Targets: []model.LabelSet{
 				{"__address__": "localhost:9090"},
-				{"__address__": "localhost:9191"}}, Labels: model.LabelSet{"my": "label"}},
+				{"__address__": "localhost:9191"},
+			}, Labels: model.LabelSet{"my": "label"}},
 		},
 		{
 			// incorrect syntax.
@@ -135,21 +139,25 @@ func TestTargetGroupYamlUnmarshal(t *tes
 		require.Equal(t, test.expectedReply, actual)
 		require.Equal(t, test.expectedGroup, tg)
 	}
-
 }
 
 func TestString(t *testing.T) {
 	// String() should return only the source, regardless of other attributes.
 	group1 :=
-		Group{Targets: []model.LabelSet{
-			{"__address__": "localhost:9090"},
-			{"__address__": "localhost:9091"}},
+		Group{
+			Targets: []model.LabelSet{
+				{"__address__": "localhost:9090"},
+				{"__address__": "localhost:9091"},
+			},
 			Source: "<source>",
-			Labels: model.LabelSet{"foo": "bar", "bar": "baz"}}
+			Labels: model.LabelSet{"foo": "bar", "bar": "baz"},
+		}
 	group2 :=
-		Group{Targets: []model.LabelSet{},
-			Source: "<source>",
-			Labels: model.LabelSet{}}
+		Group{
+			Targets: []model.LabelSet{},
+			Source:  "<source>",
+			Labels:  model.LabelSet{},
+		}
 	require.Equal(t, "<source>", group1.String())
 	require.Equal(t, "<source>", group2.String())
 	require.Equal(t, group1.String(), group2.String())
diff -pruN 2.31.2+ds1-1/discovery/triton/triton.go 2.33.5+ds1-2/discovery/triton/triton.go
--- 2.31.2+ds1-1/discovery/triton/triton.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/triton/triton.go	2022-03-08 16:34:32.000000000 +0000
@@ -188,9 +188,9 @@ func (d *Discovery) refresh(ctx context.
 	case "cn":
 		endpointFormat = "https://%s:%d/v%d/gz/discover"
 	default:
-		return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role))
+		return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role)
 	}
-	var endpoint = fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
+	endpoint := fmt.Sprintf(endpointFormat, d.sdConfig.Endpoint, d.sdConfig.Port, d.sdConfig.Version)
 	if len(d.sdConfig.Groups) > 0 {
 		groups := url.QueryEscape(strings.Join(d.sdConfig.Groups, ","))
 		endpoint = fmt.Sprintf("%s?groups=%s", endpoint, groups)
@@ -223,7 +223,7 @@ func (d *Discovery) refresh(ctx context.
 	case "cn":
 		return d.processComputeNodeResponse(data, endpoint)
 	default:
-		return nil, errors.New(fmt.Sprintf("unknown role '%s' in configuration", d.sdConfig.Role))
+		return nil, fmt.Errorf("unknown role '%s' in configuration", d.sdConfig.Role)
 	}
 }
 
diff -pruN 2.31.2+ds1-1/discovery/triton/triton_test.go 2.33.5+ds1-2/discovery/triton/triton_test.go
--- 2.31.2+ds1-1/discovery/triton/triton_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/triton/triton_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -135,8 +135,7 @@ func TestTritonSDRefreshNoTargets(t *tes
 }
 
 func TestTritonSDRefreshMultipleTargets(t *testing.T) {
-	var (
-		dstr = `{"containers":[
+	dstr := `{"containers":[
 		 	{
                                 "groups":["foo","bar","baz"],
 				"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
@@ -153,7 +152,6 @@ func TestTritonSDRefreshMultipleTargets(
 				"vm_uuid":"7b27a514-89d7-11e6-bee6-3f96f367bee7"
 			}]
 		}`
-	)
 
 	tgts := testTritonSDRefresh(t, conf, dstr)
 	require.NotNil(t, tgts)
@@ -161,9 +159,7 @@ func TestTritonSDRefreshMultipleTargets(
 }
 
 func TestTritonSDRefreshNoServer(t *testing.T) {
-	var (
-		td, _ = newTritonDiscovery(conf)
-	)
+	td, _ := newTritonDiscovery(conf)
 
 	_, err := td.refresh(context.Background())
 	require.Error(t, err)
@@ -171,9 +167,7 @@ func TestTritonSDRefreshNoServer(t *test
 }
 
 func TestTritonSDRefreshCancelled(t *testing.T) {
-	var (
-		td, _ = newTritonDiscovery(conf)
-	)
+	td, _ := newTritonDiscovery(conf)
 
 	ctx, cancel := context.WithCancel(context.Background())
 	cancel()
@@ -183,8 +177,7 @@ func TestTritonSDRefreshCancelled(t *tes
 }
 
 func TestTritonSDRefreshCNsUUIDOnly(t *testing.T) {
-	var (
-		dstr = `{"cns":[
+	dstr := `{"cns":[
 		 	{
 				"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131"
 			},
@@ -192,7 +185,6 @@ func TestTritonSDRefreshCNsUUIDOnly(t *t
 				"server_uuid":"a5894692-bd32-4ca1-908a-e2dda3c3a5e6"
 			}]
 		}`
-	)
 
 	tgts := testTritonSDRefresh(t, cnconf, dstr)
 	require.NotNil(t, tgts)
@@ -200,8 +192,7 @@ func TestTritonSDRefreshCNsUUIDOnly(t *t
 }
 
 func TestTritonSDRefreshCNsWithHostname(t *testing.T) {
-	var (
-		dstr = `{"cns":[
+	dstr := `{"cns":[
 		 	{
 				"server_uuid":"44454c4c-5000-104d-8037-b7c04f5a5131",
 				"server_hostname": "server01"
@@ -211,7 +202,6 @@ func TestTritonSDRefreshCNsWithHostname(
 				"server_hostname": "server02"
 			}]
 		}`
-	)
 
 	tgts := testTritonSDRefresh(t, cnconf, dstr)
 	require.NotNil(t, tgts)
diff -pruN 2.31.2+ds1-1/discovery/uyuni/uyuni.go 2.33.5+ds1-2/discovery/uyuni/uyuni.go
--- 2.31.2+ds1-1/discovery/uyuni/uyuni.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/uyuni/uyuni.go	2022-03-08 16:34:32.000000000 +0000
@@ -62,7 +62,7 @@ func init() {
 
 // SDConfig is the configuration for Uyuni based service discovery.
 type SDConfig struct {
-	Server           config.URL              `yaml:"server"`
+	Server           string                  `yaml:"server"`
 	Username         string                  `yaml:"username"`
 	Password         config.Secret           `yaml:"password"`
 	HTTPClientConfig config.HTTPClientConfig `yaml:",inline"`
@@ -119,15 +119,14 @@ func (c *SDConfig) UnmarshalYAML(unmarsh
 	*c = DefaultSDConfig
 	type plain SDConfig
 	err := unmarshal((*plain)(c))
-
 	if err != nil {
 		return err
 	}
-	if c.Server.URL == nil {
+	if c.Server == "" {
 		return errors.New("Uyuni SD configuration requires server host")
 	}
 
-	_, err = url.Parse(c.Server.String())
+	_, err = url.Parse(c.Server)
 	if err != nil {
 		return errors.Wrap(err, "Uyuni Server URL is not valid")
 	}
@@ -141,7 +140,7 @@ func (c *SDConfig) UnmarshalYAML(unmarsh
 	return nil
 }
 
-func login(rpcclient *xmlrpc.Client, user string, pass string) (string, error) {
+func login(rpcclient *xmlrpc.Client, user, pass string) (string, error) {
 	var result string
 	err := rpcclient.Call("auth.login", []interface{}{user, pass}, &result)
 	return result, err
@@ -151,7 +150,7 @@ func logout(rpcclient *xmlrpc.Client, to
 	return rpcclient.Call("auth.logout", token, nil)
 }
 
-func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token string, entitlement string) (map[int][]systemGroupID, error) {
+func getSystemGroupsInfoOfMonitoredClients(rpcclient *xmlrpc.Client, token, entitlement string) (map[int][]systemGroupID, error) {
 	var systemGroupsInfos []struct {
 		SystemID     int             `xmlrpc:"id"`
 		SystemGroups []systemGroupID `xmlrpc:"system_groups"`
@@ -200,8 +199,10 @@ func getEndpointInfoForSystems(
 
 // NewDiscovery returns a uyuni discovery for the given configuration.
 func NewDiscovery(conf *SDConfig, logger log.Logger) (*Discovery, error) {
-	var apiURL *url.URL
-	*apiURL = *conf.Server.URL
+	apiURL, err := url.Parse(conf.Server)
+	if err != nil {
+		return nil, err
+	}
 	apiURL.Path = path.Join(apiURL.Path, uyuniXMLRPCAPIPath)
 
 	rt, err := config.NewRoundTripperFromConfig(conf.HTTPClientConfig, "uyuni_sd")
@@ -234,7 +235,6 @@ func (d *Discovery) getEndpointLabels(
 	systemGroupIDs []systemGroupID,
 	networkInfo networkInfo,
 ) model.LabelSet {
-
 	var addr, scheme string
 	managedGroupNames := getSystemGroupNames(systemGroupIDs)
 	addr = fmt.Sprintf("%s:%d", networkInfo.Hostname, endpoint.Port)
@@ -274,7 +274,6 @@ func (d *Discovery) getTargetsForSystems
 	token string,
 	entitlement string,
 ) ([]model.LabelSet, error) {
-
 	result := make([]model.LabelSet, 0)
 
 	systemGroupIDsBySystemID, err := getSystemGroupsInfoOfMonitoredClients(rpcClient, token, entitlement)
diff -pruN 2.31.2+ds1-1/discovery/uyuni/uyuni_test.go 2.33.5+ds1-2/discovery/uyuni/uyuni_test.go
--- 2.31.2+ds1-1/discovery/uyuni/uyuni_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/discovery/uyuni/uyuni_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,58 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package uyuni
+
+import (
+	"context"
+	"io"
+	"net/http"
+	"net/http/httptest"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/discovery/targetgroup"
+)
+
+func testUpdateServices(respHandler http.HandlerFunc) ([]*targetgroup.Group, error) {
+	// Create a test server with mock HTTP handler.
+	ts := httptest.NewServer(respHandler)
+	defer ts.Close()
+
+	conf := SDConfig{
+		Server: ts.URL,
+	}
+
+	md, err := NewDiscovery(&conf, nil)
+	if err != nil {
+		return nil, err
+	}
+
+	return md.refresh(context.Background())
+}
+
+func TestUyuniSDHandleError(t *testing.T) {
+	var (
+		errTesting  = "unable to login to Uyuni API: request error: bad status code - 500"
+		respHandler = func(w http.ResponseWriter, r *http.Request) {
+			w.WriteHeader(http.StatusInternalServerError)
+			w.Header().Set("Content-Type", "application/xml")
+			io.WriteString(w, ``)
+		}
+	)
+	tgs, err := testUpdateServices(respHandler)
+
+	require.EqualError(t, err, errTesting)
+	require.Equal(t, len(tgs), 0)
+}
diff -pruN 2.31.2+ds1-1/discovery/xds/client_test.go 2.33.5+ds1-2/discovery/xds/client_test.go
--- 2.31.2+ds1-1/discovery/xds/client_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/xds/client_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -26,8 +26,8 @@ import (
 	"google.golang.org/protobuf/types/known/anypb"
 )
 
-var (
-	httpResourceConf = &HTTPResourceClientConfig{
+func testHTTPResourceConfig() *HTTPResourceClientConfig {
+	return &HTTPResourceClientConfig{
 		HTTPClientConfig: config.HTTPClientConfig{
 			TLSConfig: config.TLSConfig{InsecureSkipVerify: true},
 		},
@@ -37,11 +37,10 @@ var (
 		Server:          "http://localhost",
 		ClientID:        "test-id",
 	}
-)
+}
 
 func urlMustParse(str string) *url.URL {
 	parsed, err := url.Parse(str)
-
 	if err != nil {
 		panic(err)
 	}
@@ -92,7 +91,6 @@ func TestCreateNewHTTPResourceClient(t *
 
 	require.Equal(t, client.endpoint, "http://127.0.0.1:5000/v3/discovery:monitoring?param1=v1")
 	require.Equal(t, client.client.Timeout, 1*time.Minute)
-
 }
 
 func createTestHTTPResourceClient(t *testing.T, conf *HTTPResourceClientConfig, protocolVersion ProtocolVersion, responder discoveryResponder) (*HTTPResourceClient, func()) {
@@ -110,7 +108,7 @@ func createTestHTTPResourceClient(t *tes
 }
 
 func TestHTTPResourceClientFetchEmptyResponse(t *testing.T) {
-	client, cleanup := createTestHTTPResourceClient(t, httpResourceConf, ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
+	client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
 		return nil, nil
 	})
 	defer cleanup()
@@ -121,7 +119,7 @@ func TestHTTPResourceClientFetchEmptyRes
 }
 
 func TestHTTPResourceClientFetchFullResponse(t *testing.T) {
-	client, cleanup := createTestHTTPResourceClient(t, httpResourceConf, ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
+	client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
 		if request.VersionInfo == "1" {
 			return nil, nil
 		}
@@ -150,7 +148,7 @@ func TestHTTPResourceClientFetchFullResp
 }
 
 func TestHTTPResourceClientServerError(t *testing.T) {
-	client, cleanup := createTestHTTPResourceClient(t, httpResourceConf, ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
+	client, cleanup := createTestHTTPResourceClient(t, testHTTPResourceConfig(), ProtocolV3, func(request *v3.DiscoveryRequest) (*v3.DiscoveryResponse, error) {
 		return nil, errors.New("server error")
 	})
 	defer cleanup()
diff -pruN 2.31.2+ds1-1/discovery/xds/kuma.go 2.33.5+ds1-2/discovery/xds/kuma.go
--- 2.31.2+ds1-1/discovery/xds/kuma.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/xds/kuma.go	2022-03-08 16:34:32.000000000 +0000
@@ -103,11 +103,7 @@ func (c *KumaSDConfig) UnmarshalYAML(unm
 		return errors.Errorf("kuma SD server must not be empty and have a scheme: %s", c.Server)
 	}
 
-	if err := c.HTTPClientConfig.Validate(); err != nil {
-		return err
-	}
-
-	return nil
+	return c.HTTPClientConfig.Validate()
 }
 
 func (c *KumaSDConfig) Name() string {
diff -pruN 2.31.2+ds1-1/discovery/xds/kuma_test.go 2.33.5+ds1-2/discovery/xds/kuma_test.go
--- 2.31.2+ds1-1/discovery/xds/kuma_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/discovery/xds/kuma_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -91,7 +91,6 @@ func getKumaMadsV1DiscoveryResponse(reso
 	serialized := make([]*anypb.Any, len(resources))
 	for i, res := range resources {
 		data, err := proto.Marshal(res)
-
 		if err != nil {
 			return nil, err
 		}
diff -pruN 2.31.2+ds1-1/docs/configuration/configuration.md 2.33.5+ds1-2/docs/configuration/configuration.md
--- 2.31.2+ds1-1/docs/configuration/configuration.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/docs/configuration/configuration.md	2022-03-08 16:34:32.000000000 +0000
@@ -95,6 +95,10 @@ remote_write:
 # Settings related to the remote read feature.
 remote_read:
   [ - <remote_read> ... ]
+
+# Storage related settings that are runtime reloadable.
+storage:
+  [ - <exemplars> ... ]  
 ```
 
 ### `<scrape_config>`
@@ -1023,8 +1027,8 @@ address defaults to the `host_ip` attrib
 The following meta labels are available on targets during [relabeling](#relabel_config):
 
 * `__meta_openstack_hypervisor_host_ip`: the hypervisor node's IP address.
+* `__meta_openstack_hypervisor_hostname`: the hypervisor node's name.
 * `__meta_openstack_hypervisor_id`: the hypervisor node's ID.
-* `__meta_openstack_hypervisor_name`: the hypervisor node's name.
 * `__meta_openstack_hypervisor_state`: the hypervisor node's state.
 * `__meta_openstack_hypervisor_status`: the hypervisor node's status.
 * `__meta_openstack_hypervisor_type`: the hypervisor node's type.
@@ -1509,6 +1513,7 @@ node object in the address type order of
 Available meta labels:
 
 * `__meta_kubernetes_node_name`: The name of the node object.
+* `__meta_kubernetes_node_provider_id`: The cloud provider's name for the node object.
 * `__meta_kubernetes_node_label_<labelname>`: Each label from the node object.
 * `__meta_kubernetes_node_labelpresent_<labelname>`: `true` for each label from the node object.
 * `__meta_kubernetes_node_annotation_<annotationname>`: Each annotation from the node object.
@@ -1597,19 +1602,20 @@ address referenced in the endpointslice
 additional container ports of the pod, not bound to an endpoint port, are discovered as targets as well.
 
 Available meta labels:
+
 * `__meta_kubernetes_namespace`: The namespace of the endpoints object.
 * `__meta_kubernetes_endpointslice_name`: The name of endpointslice object.
 * For all targets discovered directly from the endpointslice list (those not additionally inferred
   from underlying pods), the following labels are attached:
-* `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object.
-* `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object.
-* `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the adress target.
-* `__meta_kubernetes_endpointslice_endpoint_conditions_ready`:  Set to `true` or `false` for the referenced endpoint's ready state.
-* `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`:  Name of the node hosting the referenced endpoint.
-* `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
-* `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
-* `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
-* `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
+  * `__meta_kubernetes_endpointslice_address_target_kind`: Kind of the referenced object.
+  * `__meta_kubernetes_endpointslice_address_target_name`: Name of referenced object.
+  * `__meta_kubernetes_endpointslice_address_type`: The ip protocol family of the address of the target.
+  * `__meta_kubernetes_endpointslice_endpoint_conditions_ready`:  Set to `true` or `false` for the referenced endpoint's ready state.
+  * `__meta_kubernetes_endpointslice_endpoint_topology_kubernetes_io_hostname`:  Name of the node hosting the referenced endpoint.
+  * `__meta_kubernetes_endpointslice_endpoint_topology_present_kubernetes_io_hostname`: Flag that shows if the referenced object has a kubernetes.io/hostname annotation.
+  * `__meta_kubernetes_endpointslice_port`: Port of the referenced endpoint.
+  * `__meta_kubernetes_endpointslice_port_name`: Named port of the referenced endpoint.
+  * `__meta_kubernetes_endpointslice_port_protocol`: Protocol of the referenced endpoint.
 * If the endpoints belong to a service, all labels of the `role: service` discovery are attached.
 * For all targets backed by a pod, all labels of the `role: pod` discovery are attached.
 
@@ -1688,6 +1694,7 @@ tls_config:
 
 # Optional namespace discovery. If omitted, all namespaces are used.
 namespaces:
+  own_namespace: <bool>
   names:
     [ - <string> ]
 
@@ -2752,7 +2759,7 @@ queue_config:
   # Initial retry delay. Gets doubled for every retry.
   [ min_backoff: <duration> | default = 30ms ]
   # Maximum retry delay.
-  [ max_backoff: <duration> | default = 100ms ]
+  [ max_backoff: <duration> | default = 5s ]
   # Retry upon receiving a 429 status code from the remote-write storage.
   # This is experimental and might change in the future.
   [ retry_on_http_429: <boolean> | default = false ]
@@ -2839,3 +2846,12 @@ tls_config:
 There is a list of
 [integrations](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage)
 with this feature.
+
+### `<exemplars>`
+
+Note that exemplar storage is still considered experimental and must be enabled via `--enable-feature=exemplar-storage`.
+
+```yaml
+# Configures the maximum size of the circular buffer used to store exemplars for all series. Resizable during runtime.
+[ max_exemplars: <int> | default = 100000 ]
+```
diff -pruN 2.31.2+ds1-1/docs/configuration/template_reference.md 2.33.5+ds1-2/docs/configuration/template_reference.md
--- 2.31.2+ds1-1/docs/configuration/template_reference.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/docs/configuration/template_reference.md	2022-03-08 16:34:32.000000000 +0000
@@ -70,10 +70,12 @@ versions.
 | title         | string        | string  | [strings.Title](https://golang.org/pkg/strings/#Title), capitalises first character of each word.|
 | toUpper       | string        | string  | [strings.ToUpper](https://golang.org/pkg/strings/#ToUpper), converts all characters to upper case.|
 | toLower       | string        | string  | [strings.ToLower](https://golang.org/pkg/strings/#ToLower), converts all characters to lower case.|
+| stripPort     | string        | string  | [net.SplitHostPort](https://pkg.go.dev/net#SplitHostPort), splits string into host and port, then returns only host.|
 | match         | pattern, text | boolean | [regexp.MatchString](https://golang.org/pkg/regexp/#MatchString) Tests for a unanchored regexp match. |
 | reReplaceAll  | pattern, replacement, text | string | [Regexp.ReplaceAllString](https://golang.org/pkg/regexp/#Regexp.ReplaceAllString) Regexp substitution, unanchored. |
 | graphLink  | expr | string | Returns path to graph view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. |
 | tableLink  | expr | string | Returns path to tabular ("Table") view in the [expression browser](https://prometheus.io/docs/visualization/browser/) for the expression. |
+| parseDuration | string | float | Parses a duration string such as "1h" into the number of seconds it represents. |
 
 ### Others
 
diff -pruN 2.31.2+ds1-1/docs/feature_flags.md 2.33.5+ds1-2/docs/feature_flags.md
--- 2.31.2+ds1-1/docs/feature_flags.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/docs/feature_flags.md	2022-03-08 16:34:32.000000000 +0000
@@ -11,13 +11,6 @@ Their behaviour can change in future rel
 You can enable them using the `--enable-feature` flag with a comma separated list of features.
 They may be enabled by default in future versions.
 
-## `@` Modifier in PromQL
-
-`--enable-feature=promql-at-modifier`
-
-The `@` modifier lets you specify the evaluation time for instant vector selectors,
-range vector selectors, and subqueries. More details can be found [here](querying/basics.md#modifier).
-
 ## Expand environment variables in external labels
 
 `--enable-feature=expand-external-labels`
@@ -26,33 +19,21 @@ Replace `${var}` or `$var` in the [`exte
 values according to the values of the current environment variables. References
 to undefined variables are replaced by the empty string.
 
-## Negative offset in PromQL
-
-This negative offset is disabled by default since it breaks the invariant
-that PromQL does not look ahead of the evaluation time for samples.
-
-`--enable-feature=promql-negative-offset`
-
-In contrast to the positive offset modifier, the negative offset modifier lets
-one shift a vector selector into the future. An example in which one may want
-to use a negative offset is reviewing past data and making temporal comparisons
-with more recent data.
-
-More details can be found [here](querying/basics.md#offset-modifier).
-
 ## Remote Write Receiver
 
 `--enable-feature=remote-write-receiver`
 
 The remote write receiver allows Prometheus to accept remote write requests from other Prometheus servers. More details can be found [here](storage.md#overview).
 
+Activating the remote write receiver via a feature flag is deprecated. Use `--web.enable-remote-write-receiver` instead. This feature flag will be ignored in future versions of Prometheus.
+
 ## Exemplars storage
 
 `--enable-feature=exemplar-storage`
 
 [OpenMetrics](https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars) introduces the ability for scrape targets to add exemplars to certain metrics. Exemplars are references to data outside of the MetricSet. A common use case are IDs of program traces.
 
-Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The flag `storage.exemplars.exemplars-limit` can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=<jaeger-trace-id>` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration).
+Exemplar storage is implemented as a fixed size circular buffer that stores exemplars in memory for all series. Enabling this feature will enable the storage of exemplars scraped by Prometheus. The config file block [storage](configuration/configuration.md#configuration-file)/[exemplars](configuration/configuration.md#exemplars) can be used to control the size of circular buffer by # of exemplars. An exemplar with just a `traceID=<jaeger-trace-id>` uses roughly 100 bytes of memory via the in-memory exemplar storage. If the exemplar storage is enabled, we will also append the exemplars to WAL for local persistence (for WAL duration).
 
 ## Memory snapshot on shutdown
 
@@ -71,6 +52,7 @@ When enabled, for each instance scrape,
 - `scrape_timeout_seconds`. The configured `scrape_timeout` for a target. This allows you to measure each target to find out how close they are to timing out with `scrape_duration_seconds / scrape_timeout_seconds`.
 - `scrape_sample_limit`. The configured `sample_limit` for a target. This allows you to measure each target
   to find out how close they are to reaching the limit with `scrape_samples_post_metric_relabeling / scrape_sample_limit`. Note that `scrape_sample_limit` can be zero if there is no limit configured, which means that the query above can return `+Inf` for targets with no limit (as we divide by zero). If you want to query only for targets that do have a sample limit use this query: `scrape_samples_post_metric_relabeling / (scrape_sample_limit > 0)`.
+- `scrape_body_size_bytes`. The uncompressed size of the most recent scrape response, if successful. Scrapes failing because `body_size_limit` is exceeded report `-1`, other scrape failures report `0`.
 
 ## New service discovery manager
 
@@ -85,3 +67,13 @@ issues upstream.
 
 In future releases, this new service discovery manager will become the default and
 this feature flag will be ignored.
+
+## Prometheus agent
+
+`--enable-feature=agent`
+
+When enabled, Prometheus runs in agent mode. The agent mode is limited to
+discovery, scrape and remote write.
+
+This is useful when you do not need to query the Prometheus data locally, but
+only from a central [remote endpoint](https://prometheus.io/docs/operating/integrations/#remote-endpoints-and-storage).
diff -pruN 2.31.2+ds1-1/docs/querying/api.md 2.33.5+ds1-2/docs/querying/api.md
--- 2.31.2+ds1-1/docs/querying/api.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/docs/querying/api.md	2022-03-08 16:34:32.000000000 +0000
@@ -620,6 +620,7 @@ $ curl http://localhost:9090/api/v1/rule
                 ],
                 "file": "/rules.yaml",
                 "interval": 60,
+                "limit": 0,
                 "name": "example"
             }
         ]
@@ -1145,3 +1146,17 @@ $ curl -XPOST http://localhost:9090/api/
 ```
 
 *New in v2.1 and supports PUT from v2.9*
+
+## Remote Write Receiver
+
+Prometheus can be configured as a receiver for the Prometheus remote write
+protocol. This is not considered an efficient way of ingesting samples. Use it
+with caution for specific low-volume use cases. It is not suitable for
+replacing the ingestion via scraping and turning Prometheus into a push-based
+metrics collection system.
+
+Enable the remote write receiver by setting
+`--web.enable-remote-write-receiver`. When enabled, the remote write receiver
+endpoint is `/api/v1/write`. Find more details [here](../storage.md#overview).
+
+*New in v2.33*
diff -pruN 2.31.2+ds1-1/docs/querying/basics.md 2.33.5+ds1-2/docs/querying/basics.md
--- 2.31.2+ds1-1/docs/querying/basics.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/docs/querying/basics.md	2022-03-08 16:34:32.000000000 +0000
@@ -209,9 +209,7 @@ can be specified:
 
     rate(http_requests_total[5m] offset -1w)
 
-This feature is enabled by setting `--enable-feature=promql-negative-offset`
-flag. See [feature flags](../feature_flags.md) for more details about
-this flag.
+Note that this allows a query to look ahead of its evaluation time.
 
 ### @ modifier
 
@@ -249,10 +247,6 @@ These 2 queries will produce the same re
     # offset before @
     http_requests_total offset 5m @ 1609746000
 
-This modifier is disabled by default since it breaks the invariant that PromQL
-does not look ahead of the evaluation time for samples. It can be enabled by setting
-`--enable-feature=promql-at-modifier` flag. See [feature flags](../feature_flags.md) for more details about this flag.
-
 Additionally, `start()` and `end()` can also be used as values for the `@` modifier as special values.
 
 For a range query, they resolve to the start and end of the range query respectively and remain the same for all steps.
@@ -262,6 +256,8 @@ For an instant query, `start()` and `end
     http_requests_total @ start()
     rate(http_requests_total[5m] @ end())
 
+Note that the `@` modifier allows a query to look ahead of its evaluation time.
+
 ## Subquery
 
 Subquery allows you to run an instant query for a given range and resolution. The result of a subquery is a range vector.
diff -pruN 2.31.2+ds1-1/docs/storage.md 2.33.5+ds1-2/docs/storage.md
--- 2.31.2+ds1-1/docs/storage.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/docs/storage.md	2022-03-08 16:34:32.000000000 +0000
@@ -129,7 +129,7 @@ The read and write protocols both use a
 
 For details on configuring remote storage integrations in Prometheus, see the [remote write](configuration/configuration.md#remote_write) and [remote read](configuration/configuration.md#remote_read) sections of the Prometheus configuration documentation.
 
-The built-in remote write receiver can be enabled by setting the `--enable-feature=remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`.
+The built-in remote write receiver can be enabled by setting the `--web.enable-remote-write-receiver` command line flag. When enabled, the remote write receiver endpoint is `/api/v1/write`.
 
 For details on the request and response messages, see the [remote storage protocol buffer definitions](https://github.com/prometheus/prometheus/blob/main/prompb/remote.proto).
 
diff -pruN 2.31.2+ds1-1/documentation/examples/custom-sd/adapter/adapter.go 2.33.5+ds1-2/documentation/examples/custom-sd/adapter/adapter.go
--- 2.31.2+ds1-1/documentation/examples/custom-sd/adapter/adapter.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/custom-sd/adapter/adapter.go	2022-03-08 16:34:32.000000000 +0000
@@ -163,7 +163,7 @@ func (a *Adapter) Run() {
 }
 
 // NewAdapter creates a new instance of Adapter.
-func NewAdapter(ctx context.Context, file string, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
+func NewAdapter(ctx context.Context, file, name string, d discovery.Discoverer, logger log.Logger) *Adapter {
 	return &Adapter{
 		ctx:     ctx,
 		disc:    d,
diff -pruN 2.31.2+ds1-1/documentation/examples/custom-sd/adapter-usage/main.go 2.33.5+ds1-2/documentation/examples/custom-sd/adapter-usage/main.go
--- 2.31.2+ds1-1/documentation/examples/custom-sd/adapter-usage/main.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/custom-sd/adapter-usage/main.go	2022-03-08 16:34:32.000000000 +0000
@@ -50,7 +50,7 @@ var (
 	tagsLabel = model.MetaLabelPrefix + "consul_tags"
 	// serviceAddressLabel is the name of the label containing the (optional) service address.
 	serviceAddressLabel = model.MetaLabelPrefix + "consul_service_address"
-	//servicePortLabel is the name of the label containing the service port.
+	// servicePortLabel is the name of the label containing the service port.
 	servicePortLabel = model.MetaLabelPrefix + "consul_service_port"
 	// serviceIDLabel is the name of the label containing the service ID.
 	serviceIDLabel = model.MetaLabelPrefix + "consul_service_id"
@@ -120,7 +120,7 @@ func (d *discovery) parseServiceNodes(re
 	for _, node := range nodes {
 		// We surround the separated list with the separator as well. This way regular expressions
 		// in relabeling rules don't have to consider tag positions.
-		var tags = "," + strings.Join(node.ServiceTags, ",") + ","
+		tags := "," + strings.Join(node.ServiceTags, ",") + ","
 
 		// If the service address is not empty it should be used instead of the node address
 		// since the service may be registered remotely through a different node.
@@ -162,7 +162,6 @@ func (d *discovery) Run(ctx context.Cont
 	for c := time.Tick(time.Duration(d.refreshInterval) * time.Second); ; {
 		var srvs map[string][]string
 		resp, err := http.Get(fmt.Sprintf("http://%s/v1/catalog/services", d.address))
-
 		if err != nil {
 			level.Error(d.logger).Log("msg", "Error getting services list", "err", err)
 			time.Sleep(time.Duration(d.refreshInterval) * time.Second)
diff -pruN 2.31.2+ds1-1/documentation/examples/prometheus-agent.yml 2.33.5+ds1-2/documentation/examples/prometheus-agent.yml
--- 2.31.2+ds1-1/documentation/examples/prometheus-agent.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/prometheus-agent.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,22 @@
+# my global config
+global:
+  scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
+  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
+  # scrape_timeout is set to the global default (10s).
+
+# A scrape configuration containing exactly one endpoint to scrape:
+# Here it's Prometheus itself.
+scrape_configs:
+  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
+  - job_name: "prometheus"
+
+    # metrics_path defaults to '/metrics'
+    # scheme defaults to 'http'.
+
+    static_configs:
+      - targets: ["localhost:9090"]
+
+# When running prometheus in Agent mode, remote-write is required.
+remote_write:
+  # Agent is able to run with a invalid remote-write URL, but, of course, will fail to push timeseries.
+  - url: "http://remote-write-url"
diff -pruN 2.31.2+ds1-1/documentation/examples/prometheus-kubernetes.yml 2.33.5+ds1-2/documentation/examples/prometheus-kubernetes.yml
--- 2.31.2+ds1-1/documentation/examples/prometheus-kubernetes.yml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/prometheus-kubernetes.yml	2022-03-08 16:34:32.000000000 +0000
@@ -185,10 +185,10 @@ scrape_configs:
         regex: __meta_kubernetes_service_label_(.+)
       - source_labels: [__meta_kubernetes_namespace]
         action: replace
-        target_label: kubernetes_namespace
+        target_label: namespace
       - source_labels: [__meta_kubernetes_service_name]
         action: replace
-        target_label: kubernetes_name
+        target_label: service
 
   # Example scrape config for probing services via the Blackbox Exporter.
   #
@@ -217,9 +217,9 @@ scrape_configs:
       - action: labelmap
         regex: __meta_kubernetes_service_label_(.+)
       - source_labels: [__meta_kubernetes_namespace]
-        target_label: kubernetes_namespace
+        target_label: namespace
       - source_labels: [__meta_kubernetes_service_name]
-        target_label: kubernetes_name
+        target_label: service
 
   # Example scrape config for probing ingresses via the Blackbox Exporter.
   #
@@ -255,9 +255,9 @@ scrape_configs:
       - action: labelmap
         regex: __meta_kubernetes_ingress_label_(.+)
       - source_labels: [__meta_kubernetes_namespace]
-        target_label: kubernetes_namespace
+        target_label: namespace
       - source_labels: [__meta_kubernetes_ingress_name]
-        target_label: kubernetes_name
+        target_label: ingress
 
   # Example scrape config for pods
   #
@@ -294,7 +294,7 @@ scrape_configs:
         regex: __meta_kubernetes_pod_label_(.+)
       - source_labels: [__meta_kubernetes_namespace]
         action: replace
-        target_label: kubernetes_namespace
+        target_label: namespace
       - source_labels: [__meta_kubernetes_pod_name]
         action: replace
-        target_label: kubernetes_pod_name
+        target_label: pod
diff -pruN 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go
--- 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/graphite/client.go	2022-03-08 16:34:32.000000000 +0000
@@ -37,7 +37,7 @@ type Client struct {
 }
 
 // NewClient creates a new Client.
-func NewClient(logger log.Logger, address string, transport string, timeout time.Duration, prefix string) *Client {
+func NewClient(logger log.Logger, address, transport string, timeout time.Duration, prefix string) *Client {
 	if logger == nil {
 		logger = log.NewNopLogger()
 	}
diff -pruN 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go
--- 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/graphite/client_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -20,13 +20,11 @@ import (
 	"github.com/stretchr/testify/require"
 )
 
-var (
-	metric = model.Metric{
-		model.MetricNameLabel: "test:metric",
-		"testlabel":           "test:value",
-		"many_chars":          "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
-	}
-)
+var metric = model.Metric{
+	model.MetricNameLabel: "test:metric",
+	"testlabel":           "test:value",
+	"many_chars":          "abc!ABC:012-3!45ö67~89./(){},=.\"\\",
+}
 
 func TestEscape(t *testing.T) {
 	// Can we correctly keep and escape valid chars.
diff -pruN 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go
--- 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/influxdb/client.go	2022-03-08 16:34:32.000000000 +0000
@@ -41,7 +41,7 @@ type Client struct {
 }
 
 // NewClient creates a new Client.
-func NewClient(logger log.Logger, conf influx.HTTPConfig, db string, rp string) *Client {
+func NewClient(logger log.Logger, conf influx.HTTPConfig, db, rp string) *Client {
 	c, err := influx.NewHTTPClient(conf)
 	// Currently influx.NewClient() *should* never return an error.
 	if err != nil {
diff -pruN 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go
--- 2.31.2+ds1-1/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/examples/remote_storage/remote_storage_adapter/opentsdb/client_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -21,13 +21,11 @@ import (
 	"github.com/stretchr/testify/require"
 )
 
-var (
-	metric = model.Metric{
-		model.MetricNameLabel: "test:metric",
-		"testlabel":           "test:value",
-		"many_chars":          "abc!ABC:012-3!45ö67~89./",
-	}
-)
+var metric = model.Metric{
+	model.MetricNameLabel: "test:metric",
+	"testlabel":           "test:value",
+	"many_chars":          "abc!ABC:012-3!45ö67~89./",
+}
 
 func TestTagsFromMetric(t *testing.T) {
 	expected := map[string]TagValue{
diff -pruN 2.31.2+ds1-1/documentation/prometheus-mixin/alerts.libsonnet 2.33.5+ds1-2/documentation/prometheus-mixin/alerts.libsonnet
--- 2.31.2+ds1-1/documentation/prometheus-mixin/alerts.libsonnet	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/prometheus-mixin/alerts.libsonnet	2022-03-08 16:34:32.000000000 +0000
@@ -391,7 +391,7 @@
                 and
                   ( 
                     count by (%(prometheusHAGroupLabels)s) (
-                      changes(process_start_time_seconds{%(prometheusSelector)s}[30m]) > 1
+                      changes(process_start_time_seconds{%(prometheusSelector)s}[1h]) > 1
                     ) 
                     / 
                     count by (%(prometheusHAGroupLabels)s) (
@@ -418,7 +418,7 @@
             },
             annotations: {
               summary: 'More than half of the Prometheus instances within the same HA group are crashlooping.',
-              description: '{{ $value | humanizePercentage }} of Prometheus instances within the %(prometheusHAGroupName)s HA group have had at least 5 total restarts or 2 unclean restarts in the last 30m.' % $._config,
+              description: '{{ $value | humanizePercentage }} of Prometheus instances within the %(prometheusHAGroupName)s HA group have had at least 5 total restarts in the last 30m or 2 unclean restarts in the last 1h.' % $._config,
             },
           },
         ],
diff -pruN 2.31.2+ds1-1/documentation/prometheus-mixin/dashboards.libsonnet 2.33.5+ds1-2/documentation/prometheus-mixin/dashboards.libsonnet
--- 2.31.2+ds1-1/documentation/prometheus-mixin/dashboards.libsonnet	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/prometheus-mixin/dashboards.libsonnet	2022-03-08 16:34:32.000000000 +0000
@@ -312,9 +312,9 @@ local template = grafana.template;
       )
       .addTemplate(
         template.new(
-          'instance',
+          'cluster',
           '$datasource',
-          'label_values(prometheus_build_info, instance)' % $._config,
+          'label_values(kube_pod_container_info{image=~".*prometheus.*"}, cluster)' % $._config,
           refresh='time',
           current={
             selected: true,
@@ -326,9 +326,9 @@ local template = grafana.template;
       )
       .addTemplate(
         template.new(
-          'cluster',
+          'instance',
           '$datasource',
-          'label_values(kube_pod_container_info{image=~".*prometheus.*"}, cluster)' % $._config,
+          'label_values(prometheus_build_info{cluster=~"$cluster"}, instance)' % $._config,
           refresh='time',
           current={
             selected: true,
diff -pruN 2.31.2+ds1-1/documentation/prometheus-mixin/go.mod 2.33.5+ds1-2/documentation/prometheus-mixin/go.mod
--- 2.31.2+ds1-1/documentation/prometheus-mixin/go.mod	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/prometheus-mixin/go.mod	1970-01-01 00:00:00.000000000 +0000
@@ -1,8 +0,0 @@
-module github.com/prometheus/prometheus/documentation/prometheus-mixin
-
-go 1.15
-
-require (
-	github.com/google/go-jsonnet v0.16.0
-	github.com/jsonnet-bundler/jsonnet-bundler v0.4.0
-)
diff -pruN 2.31.2+ds1-1/documentation/prometheus-mixin/go.sum 2.33.5+ds1-2/documentation/prometheus-mixin/go.sum
--- 2.31.2+ds1-1/documentation/prometheus-mixin/go.sum	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/prometheus-mixin/go.sum	1970-01-01 00:00:00.000000000 +0000
@@ -1,49 +0,0 @@
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
-github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
-github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
-github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
-github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
-github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
-github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
-github.com/google/go-jsonnet v0.16.0 h1:Nb4EEOp+rdeGGyB1rQ5eisgSAqrTnhf9ip+X6lzZbY0=
-github.com/google/go-jsonnet v0.16.0/go.mod h1:sOcuej3UW1vpPTZOr8L7RQimqai1a57bt5j22LzGZCw=
-github.com/jsonnet-bundler/jsonnet-bundler v0.4.0 h1:4BKZ6LDqPc2wJDmaKnmYD/vDjUptJtnUpai802MibFc=
-github.com/jsonnet-bundler/jsonnet-bundler v0.4.0/go.mod h1:/by7P/OoohkI3q4CgSFqcoFsVY+IaNbzOVDknEsKDeU=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
-github.com/mattn/go-colorable v0.1.4 h1:snbPLB8fVfU9iwbbo30TPtbLRzwWu6aJS6Xh4eaaviA=
-github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
-github.com/mattn/go-isatty v0.0.6/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
-github.com/mattn/go-isatty v0.0.11 h1:FxPOTFNqGkuDUGi3H/qkUbQO4ZiBa2brKq5r0l8TGeM=
-github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
-github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw=
-github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0=
-github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190310054646-10058d7d4faa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037 h1:YyJpGZS1sBuBCzLAR1VEpK193GlqGZbnPFnPV/5Rsb4=
-golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
-gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
-gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
-gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
diff -pruN 2.31.2+ds1-1/documentation/prometheus-mixin/Makefile 2.33.5+ds1-2/documentation/prometheus-mixin/Makefile
--- 2.31.2+ds1-1/documentation/prometheus-mixin/Makefile	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/prometheus-mixin/Makefile	2022-03-08 16:34:32.000000000 +0000
@@ -21,5 +21,9 @@ lint: prometheus_alerts.yaml
 
 	promtool check rules prometheus_alerts.yaml
 
+.PHONY: jb_install
+jb_install:
+	jb install
+
 clean:
 	rm -rf dashboards_out prometheus_alerts.yaml
diff -pruN 2.31.2+ds1-1/documentation/prometheus-mixin/tools.go 2.33.5+ds1-2/documentation/prometheus-mixin/tools.go
--- 2.31.2+ds1-1/documentation/prometheus-mixin/tools.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/documentation/prometheus-mixin/tools.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-// Copyright 2020 The prometheus Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build tools
-// +build tools
-
-// Package tools tracks dependencies for tools that used in the build process.
-// See https://github.com/golang/go/issues/25922
-package tools
-
-import (
-	_ "github.com/google/go-jsonnet/cmd/jsonnet"
-	_ "github.com/google/go-jsonnet/cmd/jsonnetfmt"
-	_ "github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb"
-)
diff -pruN 2.31.2+ds1-1/.github/dependabot.yml 2.33.5+ds1-2/.github/dependabot.yml
--- 2.31.2+ds1-1/.github/dependabot.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/.github/dependabot.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,18 @@
+version: 2
+updates:
+  - package-ecosystem: "gomod"
+    directory: "/"
+    schedule:
+      interval: "monthly"
+  - package-ecosystem: "npm"
+    directory: "/web/ui"
+    schedule:
+      interval: "monthly"
+  - package-ecosystem: "github-actions"
+    directory: "/"
+    schedule:
+      interval: "monthly"
+  - package-ecosystem: "docker"
+    directory: "/"
+    schedule:
+      interval: "monthly"
diff -pruN 2.31.2+ds1-1/.github/lock.yml 2.33.5+ds1-2/.github/lock.yml
--- 2.31.2+ds1-1/.github/lock.yml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/.github/lock.yml	1970-01-01 00:00:00.000000000 +0000
@@ -1,35 +0,0 @@
-# Configuration for Lock Threads - https://github.com/dessant/lock-threads
-
-# Number of days of inactivity before a closed issue or pull request is locked
-daysUntilLock: 180
-
-# Skip issues and pull requests created before a given timestamp. Timestamp must
-# follow ISO 8601 (`YYYY-MM-DD`). Set to `false` to disable
-skipCreatedBefore: false
-
-# Issues and pull requests with these labels will be ignored. Set to `[]` to disable
-exemptLabels: []
-
-# Label to add before locking, such as `outdated`. Set to `false` to disable
-lockLabel: false
-
-# Comment to post before locking. Set to `false` to disable
-lockComment: false
-
-# Assign `resolved` as the reason for locking. Set to `false` to disable
-setLockReason: false
-
-# Limit to only `issues` or `pulls`
-only: issues
-
-# Optionally, specify configuration settings just for `issues` or `pulls`
-# issues:
-#   exemptLabels:
-#     - help-wanted
-#   lockLabel: outdated
-
-# pulls:
-#   daysUntilLock: 30
-
-# Repository to extend settings from
-# _extends: repo
diff -pruN 2.31.2+ds1-1/.github/workflows/fuzzing.yml 2.33.5+ds1-2/.github/workflows/fuzzing.yml
--- 2.31.2+ds1-1/.github/workflows/fuzzing.yml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/.github/workflows/fuzzing.yml	2022-03-08 16:34:32.000000000 +0000
@@ -22,7 +22,7 @@ jobs:
           fuzz-seconds: 600
           dry-run: false
       - name: Upload Crash
-        uses: actions/upload-artifact@v1
+        uses: actions/upload-artifact@v2.3.1
         if: failure() && steps.build.outcome == 'success'
         with:
           name: artifacts
diff -pruN 2.31.2+ds1-1/.github/workflows/golangci-lint.yml 2.33.5+ds1-2/.github/workflows/golangci-lint.yml
--- 2.31.2+ds1-1/.github/workflows/golangci-lint.yml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/.github/workflows/golangci-lint.yml	2022-03-08 16:34:32.000000000 +0000
@@ -7,6 +7,7 @@ on:
       - "**.go"
       - "scripts/errcheck_excludes.txt"
       - ".github/workflows/golangci-lint.yml"
+      - ".golangci.yml"
   pull_request:
     paths:
       - "go.sum"
@@ -14,6 +15,7 @@ on:
       - "**.go"
       - "scripts/errcheck_excludes.txt"
       - ".github/workflows/golangci-lint.yml"
+      - ".golangci.yml"
 
 jobs:
   golangci:
diff -pruN 2.31.2+ds1-1/.github/workflows/lock.yml 2.33.5+ds1-2/.github/workflows/lock.yml
--- 2.31.2+ds1-1/.github/workflows/lock.yml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/.github/workflows/lock.yml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,22 @@
+name: 'Lock Threads'
+
+on:
+  schedule:
+    - cron: '13 23 * * *'
+  workflow_dispatch:
+
+permissions:
+  issues: write
+
+concurrency:
+  group: lock
+
+jobs:
+  action:
+    runs-on: ubuntu-latest
+    steps:
+      - uses: dessant/lock-threads@v3
+        with:
+          process-only: 'issues'
+          issue-inactive-days: '180'
+          github-token: ${{ secrets.PROMBOT_LOCKTHREADS_TOKEN }}
diff -pruN 2.31.2+ds1-1/.gitignore 2.33.5+ds1-2/.gitignore
--- 2.31.2+ds1-1/.gitignore	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/.gitignore	2022-03-08 16:34:32.000000000 +0000
@@ -8,7 +8,9 @@
 /promtool
 benchmark.txt
 /data
+/data-agent
 /cmd/prometheus/data
+/cmd/prometheus/data-agent
 /cmd/prometheus/debug
 /benchout
 /cmd/promtool/data
diff -pruN 2.31.2+ds1-1/.golangci.yml 2.33.5+ds1-2/.golangci.yml
--- 2.31.2+ds1-1/.golangci.yml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/.golangci.yml	2022-03-08 16:34:32.000000000 +0000
@@ -1,10 +1,18 @@
 run:
   deadline: 5m
+  skip-files:
+    # Skip autogenerated files.
+    - ^.*\.(pb|y)\.go$
+
+output:
+  sort-results: true
 
 linters:
   enable:
     - depguard
-    - golint
+    - gofumpt
+    - goimports
+    - revive
 
 issues:
   exclude-rules:
@@ -25,3 +33,7 @@ linters-settings:
       - github.com/go-kit/kit/log: "Use github.com/go-kit/log instead of github.com/go-kit/kit/log"
   errcheck:
     exclude: scripts/errcheck_excludes.txt
+  goimports:
+    local-prefixes: github.com/prometheus/prometheus
+  gofumpt:
+    extra-rules: true
diff -pruN 2.31.2+ds1-1/go.mod 2.33.5+ds1-2/go.mod
--- 2.31.2+ds1-1/go.mod	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/go.mod	2022-03-08 16:34:32.000000000 +0000
@@ -1,43 +1,42 @@
 module github.com/prometheus/prometheus
 
-go 1.14
+go 1.16
 
 require (
-	github.com/Azure/azure-sdk-for-go v58.2.0+incompatible
-	github.com/Azure/go-autorest/autorest v0.11.21
-	github.com/Azure/go-autorest/autorest/adal v0.9.16
+	github.com/Azure/azure-sdk-for-go v61.1.0+incompatible
+	github.com/Azure/go-autorest/autorest v0.11.23
+	github.com/Azure/go-autorest/autorest/adal v0.9.18
 	github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
 	github.com/Azure/go-autorest/autorest/validation v0.3.1 // indirect
-	github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a
-	github.com/aws/aws-sdk-go v1.41.7
+	github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137
+	github.com/aws/aws-sdk-go v1.42.31
 	github.com/cespare/xxhash/v2 v2.1.2
-	github.com/containerd/containerd v1.5.4 // indirect
+	github.com/containerd/containerd v1.6.1 // indirect
 	github.com/dennwc/varint v1.0.0
 	github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245
-	github.com/digitalocean/godo v1.69.1
-	github.com/docker/docker v20.10.9+incompatible
-	github.com/docker/go-connections v0.4.0 // indirect
-	github.com/edsrzf/mmap-go v1.0.0
-	github.com/envoyproxy/go-control-plane v0.9.9
-	github.com/envoyproxy/protoc-gen-validate v0.6.1
+	github.com/digitalocean/godo v1.73.0
+	github.com/docker/docker v20.10.12+incompatible
+	github.com/edsrzf/mmap-go v1.1.0
+	github.com/envoyproxy/go-control-plane v0.10.1
+	github.com/envoyproxy/protoc-gen-validate v0.6.2
+	github.com/fsnotify/fsnotify v1.5.1
 	github.com/go-kit/log v0.2.0
 	github.com/go-logfmt/logfmt v0.5.1
-	github.com/go-openapi/strfmt v0.20.3
+	github.com/go-openapi/strfmt v0.21.1
 	github.com/go-zookeeper/zk v1.0.2
 	github.com/gogo/protobuf v1.3.2
 	github.com/golang/snappy v0.0.4
-	github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0
-	github.com/gophercloud/gophercloud v0.22.0
+	github.com/google/pprof v0.0.0-20211214055906-6f57359322fd
+	github.com/gophercloud/gophercloud v0.24.0
 	github.com/grpc-ecosystem/grpc-gateway v1.16.0
-	github.com/hashicorp/consul/api v1.11.0
-	github.com/hetznercloud/hcloud-go v1.32.0
+	github.com/hashicorp/consul/api v1.12.0
+	github.com/hetznercloud/hcloud-go v1.33.1
 	github.com/influxdata/influxdb v1.9.5
 	github.com/json-iterator/go v1.1.12
 	github.com/kolo/xmlrpc v0.0.0-20201022064351-38db28db192b
-	github.com/linode/linodego v1.1.0
-	github.com/miekg/dns v1.1.43
-	github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 // indirect
-	github.com/morikuni/aec v1.0.0 // indirect
+	github.com/linode/linodego v1.2.1
+	github.com/miekg/dns v1.1.45
+	github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 // indirect
 	github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f
 	github.com/oklog/run v1.1.0
 	github.com/oklog/ulid v1.3.1
@@ -45,37 +44,36 @@ require (
 	github.com/opentracing/opentracing-go v1.2.0
 	github.com/pkg/errors v0.9.1
 	github.com/prometheus/alertmanager v0.23.0
-	github.com/prometheus/client_golang v1.11.0
+	github.com/prometheus/client_golang v1.12.1
 	github.com/prometheus/client_model v0.2.0
 	github.com/prometheus/common v0.32.1
 	github.com/prometheus/common/sigv4 v0.1.0
-	github.com/prometheus/exporter-toolkit v0.7.0
+	github.com/prometheus/exporter-toolkit v0.7.1
 	github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44
 	github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749
 	github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546
 	github.com/stretchr/testify v1.7.0
-	github.com/uber/jaeger-client-go v2.29.1+incompatible
+	github.com/uber/jaeger-client-go v2.30.0+incompatible
 	github.com/uber/jaeger-lib v2.4.1+incompatible
 	go.uber.org/atomic v1.9.0
 	go.uber.org/goleak v1.1.12
-	golang.org/x/net v0.0.0-20211020060615-d418f374d309
-	golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1
+	golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98
+	golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
 	golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
-	golang.org/x/sys v0.0.0-20211020174200-9d6173849985
-	golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac
-	golang.org/x/tools v0.1.7
-	google.golang.org/api v0.59.0
-	google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a
+	golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
+	golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11
+	golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be
+	google.golang.org/api v0.64.0
+	google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb
 	google.golang.org/protobuf v1.27.1
 	gopkg.in/alecthomas/kingpin.v2 v2.2.6
-	gopkg.in/fsnotify/fsnotify.v1 v1.4.7
 	gopkg.in/yaml.v2 v2.4.0
 	gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
-	k8s.io/api v0.22.2
-	k8s.io/apimachinery v0.22.2
-	k8s.io/client-go v0.22.2
+	k8s.io/api v0.22.5
+	k8s.io/apimachinery v0.22.5
+	k8s.io/client-go v0.22.5
 	k8s.io/klog v1.0.0
-	k8s.io/klog/v2 v2.20.0
+	k8s.io/klog/v2 v2.40.1
 )
 
 replace (
diff -pruN 2.31.2+ds1-1/go.sum 2.33.5+ds1-2/go.sum
--- 2.31.2+ds1-1/go.sum	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/go.sum	2022-03-08 16:34:32.000000000 +0000
@@ -1,4 +1,5 @@
 bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
+bazil.org/fuse v0.0.0-20200407214033-5883e5a4b512/go.mod h1:FbcW6z/2VytnFDhZfumh8Ss8zxHE6qpMP5sHTRe0EaM=
 cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
 cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
@@ -26,8 +27,9 @@ cloud.google.com/go v0.87.0/go.mod h1:Tp
 cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
 cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
 cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
-cloud.google.com/go v0.97.0 h1:3DXvAyifywvq64LfkKaMOmkWPS1CikIQdMe2lY9vxU8=
 cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
+cloud.google.com/go v0.99.0 h1:y/cM2iqGgGi5D5DQZl6D9STN/3dR/Vx5Mp8s752oJTY=
+cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
 cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
 cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
 cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
@@ -38,6 +40,7 @@ cloud.google.com/go/bigtable v1.2.0/go.m
 cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio=
 cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
 cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
+cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk=
 cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
 cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
 cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@@ -49,12 +52,15 @@ cloud.google.com/go/storage v1.8.0/go.mo
 cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
 collectd.org v0.3.0/go.mod h1:A/8DzQBkF6abtvrT2j/AU/4tiBgJWYyh0y/oB/4MlWE=
 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20210715213245-6c3934b029d8/go.mod h1:CzsSbkDixRphAF5hS6wbMKq0eI6ccJRb7/A0M6JBnwg=
 github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
 github.com/Azure/azure-sdk-for-go v41.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v58.2.0+incompatible h1:iCb2tuoEm3N7ZpUDOvu1Yxl1B3iOVDmaD6weaRuIPzs=
-github.com/Azure/azure-sdk-for-go v58.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8=
+github.com/Azure/azure-sdk-for-go v61.1.0+incompatible h1:Qbz3jdfkXIPjZECEuk2E7i3iLhC9Ul74pG5mQRQC+z4=
+github.com/Azure/azure-sdk-for-go v61.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
 github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
 github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
 github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs=
 github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
@@ -63,8 +69,8 @@ github.com/Azure/go-autorest/autorest v0
 github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
 github.com/Azure/go-autorest/autorest v0.11.9/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw=
 github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA=
-github.com/Azure/go-autorest/autorest v0.11.21 h1:w77zY/9RnUAWcIQyDC0Fc89mCvwftR8F+zsR/OH6enk=
-github.com/Azure/go-autorest/autorest v0.11.21/go.mod h1:Do/yuMSW/13ayUkcVREpsMHGG+MvV81uzSCFgYPj4tM=
+github.com/Azure/go-autorest/autorest v0.11.23 h1:bRQWsW25/YkoxnIqXMPF94JW33qWDcrPMZ3bINaAruU=
+github.com/Azure/go-autorest/autorest v0.11.23/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs=
 github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0=
 github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
 github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q=
@@ -72,8 +78,8 @@ github.com/Azure/go-autorest/autorest/ad
 github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
 github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
 github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M=
-github.com/Azure/go-autorest/autorest/adal v0.9.16 h1:P8An8Z9rH1ldbOLdFpxYorgOt2sywL9V24dAwWHPuGc=
-github.com/Azure/go-autorest/autorest/adal v0.9.16/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A=
+github.com/Azure/go-autorest/autorest/adal v0.9.18 h1:kLnPsRjzZZUF3K5REu/Kc+qMQrvuza2bwSnNdhmzLfQ=
+github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ=
 github.com/Azure/go-autorest/autorest/azure/auth v0.5.3/go.mod h1:4bJZhUhcq8LB20TruwHbAQsmUs2Xh+QR7utuJpLXX3A=
 github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM=
 github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA=
@@ -116,8 +122,9 @@ github.com/Microsoft/go-winio v0.4.16-0.
 github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0=
 github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
 github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
-github.com/Microsoft/go-winio v0.4.17 h1:iT12IBVClFevaf8PuVyi3UmZOVh4OqnaLxDTW2O6j3w=
 github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
+github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY=
+github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84=
 github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
 github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg=
 github.com/Microsoft/hcsshim v0.8.7/go.mod h1:OHd7sQqRFrYd3RmSgbgji+ctCwkbq2wbEYNSzOYtcBQ=
@@ -125,10 +132,14 @@ github.com/Microsoft/hcsshim v0.8.9/go.m
 github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2ow3VK6a9Lg=
 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00=
 github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600=
-github.com/Microsoft/hcsshim v0.8.18/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4=
+github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg=
+github.com/Microsoft/hcsshim v0.9.2/go.mod h1:7pLA8lDk46WKDWlVsENo92gC0XFa8rbKfyFRBqxEbCc=
 github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU=
 github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY=
 github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
+github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
 github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
 github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
@@ -152,9 +163,10 @@ github.com/alecthomas/units v0.0.0-20151
 github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
 github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
 github.com/alecthomas/units v0.0.0-20210208195552-ff826a37aa15/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
-github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a h1:E/8AP5dFtMhl5KPJz66Kt9G0n+7Sn41Fy1wv9/jHOrc=
-github.com/alecthomas/units v0.0.0-20210927113745-59d0afb8317a/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc=
+github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE=
 github.com/alexflint/go-filemutex v0.0.0-20171022225611-72bdc8eae2ae/go.mod h1:CgnQgUtFrFz9mxFNtED3jI5tLDjKlOM+oUF/sTk6ps0=
+github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk=
 github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
 github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q=
 github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
@@ -186,9 +198,10 @@ github.com/aws/aws-sdk-go v1.30.12/go.mo
 github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48=
 github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro=
 github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
-github.com/aws/aws-sdk-go v1.41.7 h1:vlpR8Cky3ZxUVNINgeRZS6N0p6zmFvu/ZqRRwrTI25U=
-github.com/aws/aws-sdk-go v1.41.7/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
+github.com/aws/aws-sdk-go v1.42.31 h1:tSv/YzjrFlbSqWmov9quBxrSNXLPUjJI7nPEB57S1+M=
+github.com/aws/aws-sdk-go v1.42.31/go.mod h1:OGr6lGMAKGlG9CVrYnWYDKIyb829c6EVBRjxqjmPepc=
 github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g=
+github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
 github.com/benbjohnson/immutable v0.2.1/go.mod h1:uc6OHo6PN2++n98KHLxW8ef4W42ylHiQSENghE1ezxI=
 github.com/benbjohnson/tmpl v1.0.0/go.mod h1:igT620JFIi44B6awvU9IsDhR77IXWtFigTLil/RPdps=
 github.com/beorn7/perks v0.0.0-20160804104726-4c0e84591b9a/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -198,6 +211,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKn
 github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
 github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
 github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
+github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
+github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
 github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
 github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
@@ -206,6 +221,7 @@ github.com/boltdb/bolt v1.3.1/go.mod h1:
 github.com/bonitoo-io/go-sql-bigquery v0.3.4-1.4.0/go.mod h1:J4Y6YJm0qTWB9aFziB7cPeSyc6dOZFyJdteSeybVpXQ=
 github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk=
 github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
+github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0=
 github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
 github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
 github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
@@ -215,7 +231,10 @@ github.com/casbin/casbin/v2 v2.1.2/go.mo
 github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
 github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
 github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
+github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
 github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
+github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
+github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
 github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
 github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
 github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM=
@@ -223,6 +242,8 @@ github.com/cespare/xxhash/v2 v2.1.1/go.m
 github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
 github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
 github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw=
+github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
+github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
 github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
 github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
 github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -230,6 +251,8 @@ github.com/cilium/ebpf v0.0.0-2020011013
 github.com/cilium/ebpf v0.0.0-20200702112145-1c8d4c9ef775/go.mod h1:7cR51M8ViRLIdUjrmSXlK9pkrsDlLHbO8jiB8X8JnOc=
 github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs=
 github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.6.2/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs=
+github.com/cilium/ebpf v0.7.0/go.mod h1:/oI2+1shJiTGAMgl6/RgJr36Eo1jzrRcAWbcXO2usCA=
 github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
 github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
 github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE=
@@ -237,9 +260,17 @@ github.com/client9/misspell v0.3.4/go.mo
 github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
 github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
 github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
-github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=
+github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
 github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw=
+github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
 github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
+github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
+github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
+github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
 github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
 github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
 github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
@@ -255,11 +286,13 @@ github.com/containerd/cgroups v0.0.0-202
 github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo=
 github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE=
 github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU=
+github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8=
 github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
 github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw=
 github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE=
 github.com/containerd/console v1.0.1/go.mod h1:XUsP6YE/mKtz6bxc+I8UiKKTP04qjQL4qcS3XoQ5xkw=
 github.com/containerd/console v1.0.2/go.mod h1:ytZPjGgY2oeTkAONYafi2kSj0aYggsf8acV1PGKCbzQ=
+github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
 github.com/containerd/containerd v1.2.10/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.3.0-beta.2.0.20190828155532-0293cbd26c69/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.3.0/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
@@ -268,13 +301,16 @@ github.com/containerd/containerd v1.3.2/
 github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
+github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA=
 github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ=
 github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU=
 github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI=
 github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s=
 github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
-github.com/containerd/containerd v1.5.4 h1:uPF0og3ByFzDnaStfiQj3fVGTEtaSNyU+bW7GR/nqGA=
-github.com/containerd/containerd v1.5.4/go.mod h1:sx18RgvW6ABJ4iYUw7Q5x7bgFOAB9B6G7+yO0XBc4zw=
+github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
+github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
+github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
+github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
 github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
 github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
 github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
@@ -282,6 +318,7 @@ github.com/containerd/continuity v0.0.0-
 github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
 github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
 github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
+github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
 github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
 github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
 github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
@@ -290,6 +327,8 @@ github.com/containerd/fifo v0.0.0-202103
 github.com/containerd/fifo v1.0.0/go.mod h1:ocF/ME1SX5b1AOlWi9r677YJmCPSwwWnQ9O123vzpE4=
 github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZHtSlv++smU=
 github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
+github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
+github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
 github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
 github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
 github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
@@ -299,14 +338,17 @@ github.com/containerd/imgcrypt v1.0.1/go
 github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6TNsg0ctmizkrOgXRNQjAPFWpMYRWuiB6dSF4Pfa5SA=
 github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
 github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
+github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
 github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
 github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
 github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
+github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
 github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
 github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
 github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
 github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y=
+github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ=
 github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc=
 github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk=
 github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg=
@@ -319,15 +361,20 @@ github.com/containerd/zfs v1.0.0/go.mod
 github.com/containernetworking/cni v0.7.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
 github.com/containernetworking/cni v0.8.0/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
 github.com/containernetworking/cni v0.8.1/go.mod h1:LGwApLUm2FpoOfxTDEeq8T9ipbpZ61X79hmU3w8FmsY=
+github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtrBI6QcRV0NiNt15Y=
 github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
 github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
+github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
 github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
 github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
 github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
+github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
 github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
 github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
+github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
 github.com/coreos/go-iptables v0.4.5/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
 github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmekav8Dbxlm1MU=
+github.com/coreos/go-iptables v0.6.0/go.mod h1:Qe8Bv2Xik5FyTXwgIbLAnv2sWSBmvWdFETJConOQ//Q=
 github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
 github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
 github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
@@ -336,16 +383,17 @@ github.com/coreos/go-systemd v0.0.0-2018
 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
 github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
 github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
+github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA=
 github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
 github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
 github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
-github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw=
 github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
 github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
 github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
+github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
 github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ=
 github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s=
 github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8=
@@ -366,17 +414,20 @@ github.com/dgryski/go-sip13 v0.0.0-20181
 github.com/dgryski/go-sip13 v0.0.0-20190329191031-25c5027a8c7b/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245 h1:9cOfvEwjQxdwKuNDTQSaMKNRvwKwgZG+U4HrjeRKHso=
 github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
-github.com/digitalocean/godo v1.69.1 h1:aCyfwth8R3DeOaWB9J9E8v7cjlDIlF19eXTt8R3XhTE=
-github.com/digitalocean/godo v1.69.1/go.mod h1:epPuOzTOOJujNo0nduDj2D5O1zu8cSpp9R+DdN0W9I0=
+github.com/digitalocean/godo v1.73.0 h1:VEPb2YIgvbG5WP9+2Yin6ty+1s01mTUrSEW4CO6alVc=
+github.com/digitalocean/godo v1.73.0/go.mod h1:GBmu8MkjZmNARE7IXRPmkbbnocNN8+uBm0xbEVw2LCs=
 github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
 github.com/dnaeon/go-vcr v1.0.1 h1:r8L/HqC0Hje5AXMu1ooW8oyQyOFv4GxqpL0nRP7SLLY=
 github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
+github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
 github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
 github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
 github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
 github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
-github.com/docker/docker v20.10.9+incompatible h1:JlsVnETOjM2RLQa0Cc1XCIspUdXW3Zenq9P54uXBm6k=
-github.com/docker/docker v20.10.9+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U=
+github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
 github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
 github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
 github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA=
@@ -395,8 +446,9 @@ github.com/eapache/go-resiliency v1.1.0/
 github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
 github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
-github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
 github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
+github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ=
+github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q=
 github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
 github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
 github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
@@ -409,11 +461,12 @@ github.com/envoyproxy/go-control-plane v
 github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
 github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
-github.com/envoyproxy/go-control-plane v0.9.9 h1:vQLjymTobffN2R0F8eTqw6q7iozfRO5Z0m+/4Vw+/uA=
-github.com/envoyproxy/go-control-plane v0.9.9/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
+github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
+github.com/envoyproxy/go-control-plane v0.10.1 h1:cgDRLG7bs59Zd+apAWuzLQL95obVYAymNJek76W3mgw=
+github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ=
 github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
-github.com/envoyproxy/protoc-gen-validate v0.6.1 h1:4CF52PCseTFt4bE+Yk3dIpdVi7XWuPVMhPtm4FaIJPM=
-github.com/envoyproxy/protoc-gen-validate v0.6.1/go.mod h1:txg5va2Qkip90uYoSKH+nkAAmXrb2j3iq4FLwdrCbXQ=
+github.com/envoyproxy/protoc-gen-validate v0.6.2 h1:JiO+kJTpmYGjEodY7O1Zk8oZcNz1+f30UtwtXoFUPzE=
+github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws=
 github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
 github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
@@ -421,6 +474,7 @@ github.com/evanphx/json-patch v4.11.0+in
 github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
 github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
 github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
+github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
 github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
 github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
 github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
@@ -429,11 +483,13 @@ github.com/franela/goblin v0.0.0-2020010
 github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
 github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
 github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
-github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
 github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
+github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
 github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA=
 github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
 github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
+github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
 github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
 github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk=
 github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
@@ -462,6 +518,12 @@ github.com/go-logfmt/logfmt v0.5.0/go.mo
 github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
 github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
 github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
+github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.1/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/stdr v1.2.0/go.mod h1:YkVgnZu1ZjjL7xTxrfm/LLZBfkhTqSR1ydtm6jTKKwI=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
 github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
 github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
 github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@@ -540,8 +602,8 @@ github.com/go-openapi/strfmt v0.19.5/go.
 github.com/go-openapi/strfmt v0.19.11/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
 github.com/go-openapi/strfmt v0.20.0/go.mod h1:UukAYgTaQfqJuAFlNxxMWNvMYiwiXtLsF2VwmoFtbtc=
 github.com/go-openapi/strfmt v0.20.1/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
-github.com/go-openapi/strfmt v0.20.3 h1:YVG4ZgPZ00km/lRHrIf7c6cKL5/4FAUtG2T9RxWAgDY=
-github.com/go-openapi/strfmt v0.20.3/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicATpEfZwHUNk=
+github.com/go-openapi/strfmt v0.21.1 h1:G6s2t5V5kGCHLVbSdZ/6lI8Wm4OzoPFkc3/cjAsKQrM=
+github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k=
 github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
 github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
 github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
@@ -573,6 +635,7 @@ github.com/go-sql-driver/mysql v1.4.1/go
 github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
 github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
 github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
 github.com/go-zookeeper/zk v1.0.2 h1:4mx0EYENAdX/B/rbunjlt5+4RTA/a9SMHBRuSKdGxPM=
 github.com/go-zookeeper/zk v1.0.2/go.mod h1:nOB03cncLtlp4t+UAkGSV+9beXP/akpekBwL+UX1Qcw=
 github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0=
@@ -603,6 +666,8 @@ github.com/godbus/dbus v0.0.0-2015110517
 github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw=
 github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4=
 github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
+github.com/godbus/dbus/v5 v5.0.6/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
 github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
 github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
 github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
@@ -686,11 +751,13 @@ github.com/google/go-cmp v0.5.4/go.mod h
 github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
 github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
 github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0=
 github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk=
 github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
-github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
 github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
+github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
+github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
 github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
 github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
 github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@@ -710,8 +777,8 @@ github.com/google/pprof v0.0.0-202102260
 github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
 github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
-github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0 h1:zHs+jv3LO743/zFGcByu2KmpbliCU2AhjcGgrdTwSG4=
-github.com/google/pprof v0.0.0-20211008130755-947d60d73cc0/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
+github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
+github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
 github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
 github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
 github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -731,8 +798,8 @@ github.com/googleapis/gnostic v0.5.5 h1:
 github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
 github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
 github.com/gophercloud/gophercloud v0.10.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss=
-github.com/gophercloud/gophercloud v0.22.0 h1:9lFISNLafZcecT0xUveIMt3IafexC6DIV9ek1SZdSMw=
-github.com/gophercloud/gophercloud v0.22.0/go.mod h1:wRtmUelyIIv3CSSDI47aUwbs075O6i+LY+pXsKCBsb4=
+github.com/gophercloud/gophercloud v0.24.0 h1:jDsIMGJ1KZpAjYfQgGI2coNQj5Q83oPzuiGJRFWgMzw=
+github.com/gophercloud/gophercloud v0.24.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c=
 github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
 github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ=
@@ -747,23 +814,27 @@ github.com/gorilla/websocket v1.4.2/go.m
 github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
 github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
 github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
+github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
 github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
 github.com/grpc-ecosystem/grpc-gateway v1.14.4/go.mod h1:6CwZWGDSPRJidgKAtJVvND6soZe6fT7iteq8wDPdhb0=
 github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
 github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
+github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
 github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE=
 github.com/hashicorp/consul/api v1.4.0/go.mod h1:xc8u05kyMa3Wjr9eEAsIAo3dg8+LywT5E/Cl7cNS5nU=
-github.com/hashicorp/consul/api v1.11.0 h1:Hw/G8TtRvOElqxVIhBzXciiSTbapq8hZ2XKZsXk5ZCE=
-github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
+github.com/hashicorp/consul/api v1.12.0 h1:k3y1FYv6nuKyNTqj6w9gXOx5r5CfLj/k/euUeBXj1OY=
+github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
+github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
 github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
 github.com/hashicorp/consul/sdk v0.4.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM=
 github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU=
 github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
 github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
-github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
 github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
+github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
+github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
 github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
 github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM=
 github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80=
@@ -777,8 +848,9 @@ github.com/hashicorp/go-msgpack v0.5.3 h
 github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
 github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
 github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
-github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI=
 github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
+github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
+github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
 github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
 github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU=
 github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc=
@@ -800,22 +872,23 @@ github.com/hashicorp/hcl v1.0.0/go.mod h
 github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
 github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
 github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY=
+github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
 github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
 github.com/hashicorp/memberlist v0.1.4/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I=
 github.com/hashicorp/memberlist v0.2.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
-github.com/hashicorp/memberlist v0.2.4 h1:OOhYzSvFnkFQXm1ysE8RjXTHsqSRDyP4emusC9K7DYg=
 github.com/hashicorp/memberlist v0.2.4/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
+github.com/hashicorp/memberlist v0.3.0 h1:8+567mCcFDnS5ADl7lrpxPMWiFCElyUEeW0gtj34fMA=
+github.com/hashicorp/memberlist v0.3.0/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE=
 github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
 github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU=
-github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM=
-github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk=
-github.com/hetznercloud/hcloud-go v1.32.0 h1:7zyN2V7hMlhm3HZdxOarmOtvzKvkcYKjM0hcwYMQZz0=
-github.com/hetznercloud/hcloud-go v1.32.0/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
+github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc=
+github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4=
+github.com/hetznercloud/hcloud-go v1.33.1 h1:W1HdO2bRLTKU4WsyqAasDSpt54fYO4WNckWYfH5AuCQ=
+github.com/hetznercloud/hcloud-go v1.33.1/go.mod h1:XX/TQub3ge0yWR2yHWmnDVIrB+MQbda1pHxkUmDlUME=
 github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
 github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo=
 github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg=
-github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE=
+github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho=
 github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
 github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
@@ -823,8 +896,9 @@ github.com/imdario/mergo v0.3.4/go.mod h
 github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
 github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
 github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
-github.com/imdario/mergo v0.3.11 h1:3tnifQM4i+fbajXKBHXWEH+KvNHqojZ778UH75j3bGA=
 github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
+github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU=
+github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
 github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
 github.com/influxdata/flux v0.65.0/go.mod h1:BwN2XG2lMszOoquQaFdPET8FRQfrXiZsWmcMO9rkaVY=
 github.com/influxdata/flux v0.131.0/go.mod h1:CKvnYe6FHpTj/E0YGI7TcOZdGiYHoToOPSnoa12RtKI=
@@ -844,7 +918,9 @@ github.com/influxdata/roaring v0.4.13-0.
 github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
 github.com/influxdata/tdigest v0.0.2-0.20210216194612-fc98d27c9e8b/go.mod h1:Z0kXnxzbTC2qrx4NaIzYkE1k66+6oEDQTvL95hQFh5Y=
 github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
+github.com/intel/goresctrl v0.2.0/go.mod h1:+CZdzouYFn5EsxgqAQTEzMfwKwuc0fVdMrT9FCCAVRQ=
 github.com/j-keck/arping v0.0.0-20160618110441-2cf9dc699c56/go.mod h1:ymszkNOg6tORTn+6F6j+Jc8TOr5osrynvN6ivFWZ2GA=
+github.com/j-keck/arping v1.0.2/go.mod h1:aJbELhR92bSk7tp79AWM/ftfc90EfEi2bQJrbBFOsPw=
 github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
 github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4=
 github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k=
@@ -855,8 +931,10 @@ github.com/jmespath/go-jmespath v0.4.0 h
 github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
 github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
 github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
+github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8=
 github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg=
 github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
+github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
 github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
 github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
 github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA=
@@ -889,6 +967,7 @@ github.com/klauspost/compress v1.4.0/go.
 github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
 github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
 github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
+github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
 github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
 github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
 github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
@@ -901,6 +980,7 @@ github.com/kr/fs v0.1.0/go.mod h1:FFnZGq
 github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
 github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
 github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
+github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
 github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
 github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
 github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
@@ -915,11 +995,13 @@ github.com/leodido/go-urn v1.1.0/go.mod
 github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
 github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM=
 github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4=
-github.com/linode/linodego v1.1.0 h1:ZiFVUptlzuExtUbHZtXiN7I0dAOFQAyirBKb/6/n9n4=
-github.com/linode/linodego v1.1.0/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ=
-github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU=
+github.com/linode/linodego v1.2.1 h1:v0vS/n9dGMA9evG+fhLJcy7hsf6TUVmacfYiYzARhts=
+github.com/linode/linodego v1.2.1/go.mod h1:x/7+BoaKd4unViBmS2umdjYyVAmpFtBtEXZ0wou7FYQ=
+github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo=
+github.com/lyft/protoc-gen-star v0.5.3/go.mod h1:V0xaHgaf5oCCqmcxYcWiDfTiKsZsRc87/1qhoTACD8w=
 github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
 github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
+github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
 github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
 github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
@@ -951,17 +1033,21 @@ github.com/mattn/go-isatty v0.0.12/go.mo
 github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
 github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o=
+github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
 github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
 github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
 github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI=
 github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
+github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
 github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
 github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
 github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
 github.com/miekg/dns v1.1.29/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM=
-github.com/miekg/dns v1.1.43 h1:JKfpVSCB84vrAmHzyrsxB5NAr5kLoMXZArPSw7Qlgyg=
-github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4=
+github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
+github.com/miekg/dns v1.1.45 h1:g5fRIhm9nx7g8osrAvgb16QJfmyMsyOCb+J7LSv+Qzk=
+github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
 github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
 github.com/mileusna/useragent v0.0.0-20190129205925-3e331f0949a5/go.mod h1:JWhYAp2EXqUtsxTKdeGlY8Wp44M7VxThC9FEoNGi2IE=
 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
@@ -988,10 +1074,14 @@ github.com/moby/locker v1.0.1/go.mod h1:
 github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
 github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
 github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A=
+github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU=
+github.com/moby/sys/signal v0.6.0/go.mod h1:GQ6ObYZfqacOwTtlXvcmh9A26dVRul/hbOZn88Kg8Tg=
 github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ=
+github.com/moby/sys/symlink v0.2.0/go.mod h1:7uZVF2dqJjG/NsClqul95CqKOBRQyYSNnJ6BMgR/gFs=
 github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk=
-github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc=
+github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 h1:dcztxKSvZ4Id8iPpHERQBbIJfabdt4wUm5qy3wOL2Zc=
+github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw=
 github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
 github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -1018,10 +1108,10 @@ github.com/nats-io/nkeys v0.1.0/go.mod h
 github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
 github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
 github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
-github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
 github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
-github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
 github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
 github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs=
 github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA=
 github.com/oklog/run v1.1.0 h1:GEenZ1cK0+q0+wsJew9qUg/DyD8k3JzYsZAi5gYi2mA=
@@ -1033,20 +1123,27 @@ github.com/onsi/ginkgo v0.0.0-2015120214
 github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
 github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
 github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
-github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
+github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0=
 github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
+github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
 github.com/onsi/gomega v0.0.0-20151007035656-2152b45fa28a/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
 github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
+github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
 github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
 github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
-github.com/onsi/gomega v1.10.3 h1:gph6h/qe9GSUw1NhH1gp+qb+h8rXD8Cy60Z32Qw3ELA=
 github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc=
+github.com/onsi/gomega v1.15.0 h1:WjP/FQ/sk43MRmnEcT+MlDw2TFvkrXlprrPST/IudjU=
+github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0=
 github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
 github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s=
@@ -1055,21 +1152,27 @@ github.com/opencontainers/go-digest v1.0
 github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
 github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
-github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
 github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
+github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 h1:q37d91F6BO4Jp1UqWiun0dUFYaqv6WsKTLTCaWv+8LY=
+github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
 github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
 github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
+github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
+github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
 github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-spec v1.0.3-0.20200929063507-e6143ca7d51d/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
+github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
 github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
 github.com/opencontainers/selinux v1.6.0/go.mod h1:VVGKuOLlE7v4PJyT6h7mNWvq1rzqiriPsEqVhc+svHE=
 github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3ogry1nUQF8Evvo=
+github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
+github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
 github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis=
 github.com/opentracing-contrib/go-stdlib v0.0.0-20190519235532-cf7a6c988dc9/go.mod h1:PLldrQSroqzH70Xl+1DQcGnefIbqsKR7UDaiux3zV+w=
 github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
@@ -1094,6 +1197,7 @@ github.com/pelletier/go-toml v1.2.0/go.m
 github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
 github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE=
 github.com/pelletier/go-toml v1.8.1/go.mod h1:T2/BmBdy8dvIRq1a/8aqjN41wvWlN4lrapLU/GW4pbc=
+github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
 github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac=
 github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
 github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
@@ -1128,8 +1232,9 @@ github.com/prometheus/client_golang v1.3
 github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
 github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
 github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
-github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
 github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
+github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
+github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
 github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
 github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@@ -1155,8 +1260,8 @@ github.com/prometheus/common v0.32.1/go.
 github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4=
 github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI=
 github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
-github.com/prometheus/exporter-toolkit v0.7.0 h1:XtYeVeeC5daG4txbc9+mieKq+/AK4gtIBLl9Mulrjnk=
-github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
+github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y=
+github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g=
 github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
 github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@@ -1169,8 +1274,9 @@ github.com/prometheus/procfs v0.0.8/go.m
 github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
 github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
-github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
 github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
+github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
+github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
 github.com/prometheus/prometheus v0.0.0-20200609090129-a6600f564e3c/go.mod h1:S5n0C6tSgdnwWshBUceRx5G1OsjLv/EeZ9t3wIfEtsY=
 github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
@@ -1186,15 +1292,19 @@ github.com/russross/blackfriday/v2 v2.0.
 github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
 github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
+github.com/safchain/ethtool v0.0.0-20210803160452-9aa261dae9b1/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4=
 github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E=
 github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
 github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
 github.com/satori/go.uuid v1.2.1-0.20181028125025-b2ce2384e17b/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44 h1:3egqo0Vut6daANFm7tOXdNAa8v5/uLU+sgCJrc88Meo=
 github.com/scaleway/scaleway-sdk-go v1.0.0-beta.7.0.20210223165440-c65ae3540d44/go.mod h1:CJJ5VAbozOl0yEw7nHB9+7BXTJbIn6h7W+f6Gau5IP8=
+github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
+github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
 github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
 github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
+github.com/seccomp/libseccomp-golang v0.9.2-0.20210429002308-3879420cc921/go.mod h1:JA8cRccbGaA1s33RQf7Y1+q9gHmZX1yB/z9WDN1C6fg=
 github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
 github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
 github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -1215,23 +1325,26 @@ github.com/sirupsen/logrus v1.4.0/go.mod
 github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
 github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
 github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
-github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
 github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
+github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE=
+github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
 github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
 github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA=
 github.com/snowflakedb/gosnowflake v1.3.13/go.mod h1:6nfka9aTXkUNha1p1cjeeyjDvcyh7jfjp0l8kGpDBok=
 github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM=
+github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0=
 github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY=
 github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
 github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
 github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
 github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4=
-github.com/spf13/afero v1.3.4/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
+github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
 github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
 github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
 github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
+github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
 github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
 github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
 github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
@@ -1240,6 +1353,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:
 github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
 github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
 github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE=
+github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
 github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h1:AO3tvPzVZ/ayst6UlUKUv6rcPQInYe3IknH3jYhAKu8=
 github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
 github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
@@ -1259,6 +1373,7 @@ github.com/stretchr/testify v1.5.1/go.mo
 github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
 github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
 github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
 github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
 github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
@@ -1269,13 +1384,15 @@ github.com/tinylib/msgp v1.0.2/go.mod h1
 github.com/tinylib/msgp v1.1.0/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
+github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U=
 github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
+github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
 github.com/uber-go/tally v3.3.15+incompatible/go.mod h1:YDTIBxdXyOU/sCWilKB4bgyufu1cEi0jdVnRdxvjnmU=
 github.com/uber/athenadriver v1.1.4/go.mod h1:tQjho4NzXw55LGfSZEcETuYydpY1vtmixUabHkC1K/E=
 github.com/uber/jaeger-client-go v2.23.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
 github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
-github.com/uber/jaeger-client-go v2.29.1+incompatible h1:R9ec3zO3sGpzs0abd43Y+fBZRJ9uiH6lXyR/+u6brW4=
-github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
+github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o=
+github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk=
 github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
 github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg=
 github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U=
@@ -1293,9 +1410,11 @@ github.com/vektah/gqlparser v1.1.2/go.mo
 github.com/vishvananda/netlink v0.0.0-20181108222139-023a6dafdcdf/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk=
 github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
 github.com/vishvananda/netlink v1.1.1-0.20201029203352-d40f9887b852/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
+github.com/vishvananda/netlink v1.1.1-0.20210330154013-f5de75959ad5/go.mod h1:twkDnbuQxJYemMlGd4JFIcuhgX83tXhKS2B/PRMpOho=
 github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI=
 github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
 github.com/vishvananda/netns v0.0.0-20200728191858-db3c7e526aae/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
 github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
 github.com/willf/bitset v1.1.9/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
 github.com/willf/bitset v1.1.11-0.20200630133818-d5bec3311243/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4=
@@ -1318,15 +1437,23 @@ github.com/yuin/goldmark v1.1.27/go.mod
 github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
 github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
-github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
+github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
 github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
 github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
 github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
 go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
 go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
+go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
 go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
 go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
+go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
+go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
+go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ=
+go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0=
+go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE=
+go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc=
+go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4=
 go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
 go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
 go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@@ -1336,8 +1463,9 @@ go.mongodb.org/mongo-driver v1.3.4/go.mo
 go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
 go.mongodb.org/mongo-driver v1.4.4/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
 go.mongodb.org/mongo-driver v1.4.6/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc=
-go.mongodb.org/mongo-driver v1.5.1 h1:9nOVLGDfOaZ9R0tBumx/BcuqkbFpyTCU2r/Po7A2azI=
 go.mongodb.org/mongo-driver v1.5.1/go.mod h1:gRXCHX4Jo7J0IJ1oDQyUxF7jfy19UfxniMS4xxMmUqw=
+go.mongodb.org/mongo-driver v1.7.5 h1:ny3p0reEpgsR2cfA5cjgwFZg3Cv/ofFh/8jbhGtz9VI=
+go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng=
 go.mozilla.org/pkcs7 v0.0.0-20200128120323-432b2356ecb1/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk=
 go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
 go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
@@ -1349,26 +1477,50 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiO
 go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
 go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
 go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
+go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
+go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.28.0/go.mod h1:vEhqr0m4eTc+DWxfsXoXue2GBgV2uUwVznkGIHW/e5w=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
+go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
+go.opentelemetry.io/otel v1.3.0/go.mod h1:PWIKzi6JCp7sM0k9yZ43VX+T345uNbAkDKwHVjb2PTs=
+go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
+go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.3.0/go.mod h1:VpP4/RMn8bv8gNo9uK7/IMY4mtWLELsS+JIP0inH0h4=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.3.0/go.mod h1:hO1KLR7jcKaDDKDkvI9dP/FIhpmna5lkqPUQdEjFAM8=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.3.0/go.mod h1:keUU7UfnwWTWpJ+FWnyqmogPa82nuU5VUANFq49hlMY=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.3.0/go.mod h1:QNX1aly8ehqqX1LEa6YniTU7VY9I6R3X/oPxhGdTceE=
+go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
+go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
+go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
+go.opentelemetry.io/otel/sdk v1.3.0/go.mod h1:rIo4suHNhQwBIPg9axF8V9CA72Wz2mKF1teNrup8yzs=
+go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
+go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
+go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
+go.opentelemetry.io/otel/trace v1.3.0/go.mod h1:c/VDhno8888bvQYmbYLqe41/Ldmr/KKunbvWM4/fEjk=
 go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
+go.opentelemetry.io/proto/otlp v0.11.0/go.mod h1:QpEjXPrNQzrFDZgoTo49dgHR9RYRSrg3NAKnUGl9YpQ=
 go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
 go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
 go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
 go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
+go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
 go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE=
 go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
+go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
 go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA=
 go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ=
 go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
 go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
 go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
 go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
+go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
 go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
 go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
 go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
 go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
 go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
 go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
+go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
 golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20180505025534-4ec37c66abab/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
 golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1401,8 +1553,11 @@ golang.org/x/crypto v0.0.0-2020100217020
 golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
 golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
 golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
-golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e h1:gsTQYXdTw2Gq7RBsWvlQ91b+aEQ6bXFUngBGuR8sPpI=
 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
+golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
+golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
 golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
 golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1415,7 +1570,6 @@ golang.org/x/exp v0.0.0-20191129062945-2
 golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
 golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
-golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y=
 golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
 golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs=
 golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
@@ -1443,8 +1597,10 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1
 golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
 golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
 golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
+golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
+golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
 golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
 golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -1504,14 +1660,21 @@ golang.org/x/net v0.0.0-20210119194325-5
 golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
 golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
 golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
+golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
 golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
-golang.org/x/net v0.0.0-20211020060615-d418f374d309 h1:A0lJIi+hcTR6aajJH4YqKWwohY4aW9RO7oRMcdv+HKI=
-golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
+golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98 h1:+6WJMRLHlD7X7frgp7TUZ36RnQzSf9wVVTNakEp+nqY=
+golang.org/x/net v0.0.0-20220105145211-5b0dc2dfae98/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
 golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
 golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
 golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1527,8 +1690,8 @@ golang.org/x/oauth2 v0.0.0-2021051416434
 golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
-golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1 h1:B333XXssMuKQeBwiNODx4TupZy7bf4sxFZnN2ZOcvUE=
-golang.org/x/oauth2 v0.0.0-20211005180243-6b3c2da341f1/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
+golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
 golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
 golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -1570,6 +1733,7 @@ golang.org/x/sys v0.0.0-20190606165138-5
 golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1623,6 +1787,7 @@ golang.org/x/sys v0.0.0-20200905004654-b
 golang.org/x/sys v0.0.0-20200909081042-eff7692f9009/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200916030750-2334cc1a136f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200922070232-aee5d888a860/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1630,6 +1795,7 @@ golang.org/x/sys v0.0.0-20201119102817-f
 golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1639,7 +1805,10 @@ golang.org/x/sys v0.0.0-20210315160823-c
 golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210426230700-d19ff857e887/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
 golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -1649,15 +1818,25 @@ golang.org/x/sys v0.0.0-20210616094352-5
 golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210903071746-97244b99971b/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20211020174200-9d6173849985 h1:LOlKVhfDyahgmqa97awczplwkjzNaELFg3zRIJ13RYo=
-golang.org/x/sys v0.0.0-20211020174200-9d6173849985/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
+golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
 golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
 golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
-golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
 golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
+golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
 golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
 golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1666,8 +1845,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9s
 golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
 golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
 golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
+golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
 golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -1676,8 +1856,9 @@ golang.org/x/time v0.0.0-20200416051211-
 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
-golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac h1:7zkz7BUtwNFFqcowJ+RIgu2MaV/MapERkDIy+mwPyjs=
 golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 h1:GZokNIeuVkl3aZHJchRrr13WCsols02MLUcz1U9is6M=
+golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
 golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
 golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -1705,6 +1886,7 @@ golang.org/x/tools v0.0.0-20190617190820
 golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
 golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
+golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI=
 golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1712,6 +1894,8 @@ golang.org/x/tools v0.0.0-20190911174233
 golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
 golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
@@ -1737,17 +1921,20 @@ golang.org/x/tools v0.0.0-20200312045724
 golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
 golang.org/x/tools v0.0.0-20200422205258-72e4a01eba43/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
 golang.org/x/tools v0.0.0-20200721032237-77f530d86f9a/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
 golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
 golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
+golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
 golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
 golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
@@ -1756,8 +1943,9 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xw
 golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
 golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
-golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
-golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
+golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
+golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be h1:JRBiPXZpZ1FsceyPRRme0vX394zXC3xlhqu705k9nzM=
+golang.org/x/tools v0.1.9-0.20211209172050-90a85b2969be/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
 golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
 golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -1766,10 +1954,8 @@ golang.org/x/xerrors v0.0.0-202008041841
 gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
 gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
 gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=
-gonum.org/v1/gonum v0.8.2 h1:CCXrcPKiGGotvnN6jfUsKk4rRqm7q09/YbKb5xCEvtM=
 gonum.org/v1/gonum v0.8.2/go.mod h1:oe/vMfY3deqTw+1EZJhuvEW2iwGF1bW9wwu7XCu0+v0=
 gonum.org/v1/netlib v0.0.0-20181029234149-ec6d1f5cefe6/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
-gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0 h1:OE9mWmgKkjJyEmDAAtGMPjXu+YNeGvK9VTSHY6+Qihc=
 gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6dHfk7C6KdzKA7wR7u/rKwOGE66zvw=
 gonum.org/v1/plot v0.0.0-20190515093506-e2840ee46a6b/go.mod h1:Wt8AAjI+ypCyYX3nZBvf6cAIx93T+c/OS2HFAYskSZc=
 google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
@@ -1801,8 +1987,9 @@ google.golang.org/api v0.54.0/go.mod h1:
 google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
 google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
 google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
-google.golang.org/api v0.59.0 h1:fPfFO7gttlXYo2ALuD3HxJzh8vaF++4youI0BkFL6GE=
-google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
+google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
+google.golang.org/api v0.64.0 h1:l3pi8ncrQgB9+ncFw3A716L8lWujnXniBYbxWqqy6tE=
+google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
 google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
 google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1841,11 +2028,13 @@ google.golang.org/genproto v0.0.0-202003
 google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200420144010-e5e8543f8aeb/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
+google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
 google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
 google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
+google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
 google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
 google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
 google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@@ -1876,9 +2065,11 @@ google.golang.org/genproto v0.0.0-202108
 google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
 google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
 google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211008145708-270636b82663/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
-google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a h1:8maMHMQp9NroHXhc3HelFX9Ay2lWlXLcdH5mw5Biz0s=
-google.golang.org/genproto v0.0.0-20211020151524-b7c3a969101a/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
+google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb h1:ZrsicilzPCS/Xr8qtBZZLpy4P9TYXAfl49ctG1/5tgw=
+google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
 google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
 google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
 google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1911,8 +2102,11 @@ google.golang.org/grpc v1.37.1/go.mod h1
 google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
 google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
 google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
-google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=
 google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
+google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
+google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
+google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
 google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
 google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@@ -1935,12 +2129,12 @@ gopkg.in/check.v1 v0.0.0-20161208181325-
 gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
 gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
 gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
 gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
 gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
 gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
-gopkg.in/fsnotify/fsnotify.v1 v1.4.7 h1:XNNYLJHt73EyYiCZi6+xjupS9CpvmiDgjPTAjrBlQbo=
 gopkg.in/fsnotify/fsnotify.v1 v1.4.7/go.mod h1:Fyux9zXlo4rWoMSIzpn9fDAYjalPqJ/K1qJ27s+7ltE=
 gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
 gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
@@ -1948,6 +2142,7 @@ gopkg.in/go-playground/assert.v1 v1.2.1/
 gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
 gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
 gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
 gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
 gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
 gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
@@ -1989,49 +2184,61 @@ k8s.io/api v0.17.5/go.mod h1:0zV5/ungglg
 k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
 k8s.io/api v0.20.4/go.mod h1:++lNL1AJMkDymriNniQsWRkMDzRaX2Y/POTUi8yvqYQ=
 k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8=
-k8s.io/api v0.22.2 h1:M8ZzAD0V6725Fjg53fKeTJxGsJvRbk4TEm/fexHMtfw=
-k8s.io/api v0.22.2/go.mod h1:y3ydYpLJAaDI+BbSe2xmGcqxiWHmWjkEeIbiwHvnPR8=
+k8s.io/api v0.22.5 h1:xk7C+rMjF/EGELiD560jdmwzrB788mfcHiNbMQLIVI8=
+k8s.io/api v0.22.5/go.mod h1:mEhXyLaSD1qTOf40rRiKXkc+2iCem09rWLlFwhCEiAs=
 k8s.io/apimachinery v0.17.5/go.mod h1:ioIo1G/a+uONV7Tv+ZmCbMG1/a3kVw5YcDdncd8ugQ0=
 k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
 k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
 k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MAc=
-k8s.io/apimachinery v0.22.2 h1:ejz6y/zNma8clPVfNDLnPbleBo6MpoFy/HBiBqCouVk=
-k8s.io/apimachinery v0.22.2/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.1/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
+k8s.io/apimachinery v0.22.5 h1:cIPwldOYm1Slq9VLBRPtEYpyhjIm1C6aAMAoENuvN9s=
+k8s.io/apimachinery v0.22.5/go.mod h1:xziclGKwuuJ2RM5/rSFQSYAj0zdbci3DH8kj+WvyN0U=
 k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
 k8s.io/apiserver v0.20.4/go.mod h1:Mc80thBKOyy7tbvFtB4kJv1kbdD0eIH8k8vianJcbFM=
 k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q=
+k8s.io/apiserver v0.22.5/go.mod h1:s2WbtgZAkTKt679sYtSudEQrTGWUSQAPe6MupLnlmaQ=
 k8s.io/client-go v0.17.5/go.mod h1:S8uZpBpjJJdEH/fEyxcqg7Rn0P5jH+ilkgBHjriSmNo=
 k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
 k8s.io/client-go v0.20.4/go.mod h1:LiMv25ND1gLUdBeYxBIwKpkSC5IsozMMmOOeSJboP+k=
 k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0=
-k8s.io/client-go v0.22.2 h1:DaSQgs02aCC1QcwUdkKZWOeaVsQjYvWv8ZazcZ6JcHc=
-k8s.io/client-go v0.22.2/go.mod h1:sAlhrkVDf50ZHx6z4K0S40wISNTarf1r800F+RlCF6U=
+k8s.io/client-go v0.22.5 h1:I8Zn/UqIdi2r02aZmhaJ1hqMxcpfJ3t5VqvHtctHYFo=
+k8s.io/client-go v0.22.5/go.mod h1:cs6yf/61q2T1SdQL5Rdcjg9J1ElXSwbjSrW2vFImM4Y=
+k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0=
 k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk=
 k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI=
 k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM=
+k8s.io/component-base v0.22.5/go.mod h1:VK3I+TjuF9eaa+Ln67dKxhGar5ynVbwnGrUiNF4MqCI=
 k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM=
 k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
 k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI=
 k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc=
+k8s.io/cri-api v0.23.1/go.mod h1:REJE3PSU0h/LOV1APBrupxrEJqnoxZC8KWzkBUHwrK4=
 k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
 k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
+k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
 k8s.io/kube-openapi v0.0.0-20200316234421-82d701f24f9d/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
+k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o=
 k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
-k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e h1:KLHHjkdQFomZy8+06csTWZ0m1343QqxZhR2LJ1OxCYM=
 k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
+k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
 k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
 k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
 k8s.io/utils v0.0.0-20200414100711-2df71ebbae66/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
 k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
 k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs=
+k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
 rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
 rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
 rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
 sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.15/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
+sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
 sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
+sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
 sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
 sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
 sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
diff -pruN 2.31.2+ds1-1/MAINTAINERS.md 2.33.5+ds1-2/MAINTAINERS.md
--- 2.31.2+ds1-1/MAINTAINERS.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/MAINTAINERS.md	2022-03-08 16:34:32.000000000 +0000
@@ -1,7 +1,7 @@
 Julien Pivotto (<roidelapluie@prometheus.io> / @roidelapluie) and Levi Harrison (<levi@leviharrison.dev> / @LeviHarrison) are the main/default maintainers, some parts of the codebase have other maintainers:
 
 * `cmd`
-  * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessica.greben1@gmail.com> / @jessicagreben)
+  * `promtool`: David Leadbeater (<dgl@dgl.cx> / @dgl), Jessica Grebenschikov (<jessicagreben@prometheus.io> / @jessicagreben)
 * `discovery`
   * `k8s`: Frederic Branczyk (<fbranczyk@gmail.com> / @brancz)
 * `documentation`
@@ -9,6 +9,7 @@ Julien Pivotto (<roidelapluie@prometheus
 * `storage`
   * `remote`: Chris Marchbanks (<csmarchbanks@gmail.com> / @csmarchbanks), Callum Styan (<callumstyan@gmail.com> / @cstyan), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka), Tom Wilkie (<tom.wilkie@gmail.com> / @tomwilkie)
 * `tsdb`: Ganesh Vernekar (<ganesh@grafana.com> / @codesome), Bartłomiej Płotka (<bwplotka@gmail.com> / @bwplotka)
+  * `agent`: Robert Fratto (<robert.fratto@grafana.com> / @rfratto)
 * `web`
   * `ui`: Julius Volz (<julius.volz@gmail.com> / @juliusv)
     * `module`: Augustin Husson (<husson.augustin@gmail.com> @nexucis)
diff -pruN 2.31.2+ds1-1/Makefile 2.33.5+ds1-2/Makefile
--- 2.31.2+ds1-1/Makefile	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/Makefile	2022-03-08 16:34:32.000000000 +0000
@@ -55,8 +55,8 @@ assets: ui-install ui-build
 	# Un-setting GOOS and GOARCH here because the generated Go code is always the same,
 	# but the cached object code is incompatible between architectures and OSes (which
 	# breaks cross-building for different combinations on CI in the same container).
-	cd web/ui && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS)
-	@$(GOFMT) -w ./web/ui
+	cd $(UI_PATH) && GO111MODULE=$(GO111MODULE) GOOS= GOARCH= $(GO) generate -x -v $(GOOPTS)
+	@$(GOFMT) -w ./$(UI_PATH)
 
 .PHONY: test
 # If we only want to only test go code we have to change the test target
diff -pruN 2.31.2+ds1-1/Makefile.common 2.33.5+ds1-2/Makefile.common
--- 2.31.2+ds1-1/Makefile.common	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/Makefile.common	2022-03-08 16:34:32.000000000 +0000
@@ -78,7 +78,7 @@ ifneq ($(shell which gotestsum),)
 endif
 endif
 
-PROMU_VERSION ?= 0.12.0
+PROMU_VERSION ?= 0.13.0
 PROMU_URL     := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
 
 GOLANGCI_LINT :=
@@ -160,7 +160,7 @@ endif
 update-go-deps:
 	@echo ">> updating Go dependencies"
 	@for m in $$($(GO) list -mod=readonly -m -f '{{ if and (not .Indirect) (not .Main)}}{{.Path}}{{end}}' all); do \
-		$(GO) get $$m; \
+		$(GO) get -d $$m; \
 	done
 	GO111MODULE=$(GO111MODULE) $(GO) mod tidy
 ifneq (,$(wildcard vendor))
diff -pruN 2.31.2+ds1-1/model/exemplar/exemplar.go 2.33.5+ds1-2/model/exemplar/exemplar.go
--- 2.31.2+ds1-1/model/exemplar/exemplar.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/exemplar/exemplar.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,50 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package exemplar
+
+import "github.com/prometheus/prometheus/model/labels"
+
+// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
+// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
+const ExemplarMaxLabelSetLength = 128
+
+// Exemplar is additional information associated with a time series.
+type Exemplar struct {
+	Labels labels.Labels `json:"labels"`
+	Value  float64       `json:"value"`
+	Ts     int64         `json:"timestamp"`
+	HasTs  bool
+}
+
+type QueryResult struct {
+	SeriesLabels labels.Labels `json:"seriesLabels"`
+	Exemplars    []Exemplar    `json:"exemplars"`
+}
+
+// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for
+// both exemplars then the timestamps will be ignored for the comparison. This can come up
+// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp
+// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without
+// an exported timestamp, as a duplicate of itself for each subsequent scrape.
+func (e Exemplar) Equals(e2 Exemplar) bool {
+	if !labels.Equal(e.Labels, e2.Labels) {
+		return false
+	}
+
+	if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {
+		return false
+	}
+
+	return e.Value == e2.Value
+}
diff -pruN 2.31.2+ds1-1/model/labels/labels.go 2.33.5+ds1-2/model/labels/labels.go
--- 2.31.2+ds1-1/model/labels/labels.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/labels.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,474 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"bytes"
+	"encoding/json"
+	"sort"
+	"strconv"
+
+	"github.com/cespare/xxhash/v2"
+)
+
+// Well-known label names used by Prometheus components.
+const (
+	MetricName   = "__name__"
+	AlertName    = "alertname"
+	BucketLabel  = "le"
+	InstanceName = "instance"
+
+	labelSep = '\xfe'
+)
+
+var seps = []byte{'\xff'}
+
+// Label is a key/value pair of strings.
+type Label struct {
+	Name, Value string
+}
+
+// Labels is a sorted set of labels. Order has to be guaranteed upon
+// instantiation.
+type Labels []Label
+
+func (ls Labels) Len() int           { return len(ls) }
+func (ls Labels) Swap(i, j int)      { ls[i], ls[j] = ls[j], ls[i] }
+func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
+
+func (ls Labels) String() string {
+	var b bytes.Buffer
+
+	b.WriteByte('{')
+	for i, l := range ls {
+		if i > 0 {
+			b.WriteByte(',')
+			b.WriteByte(' ')
+		}
+		b.WriteString(l.Name)
+		b.WriteByte('=')
+		b.WriteString(strconv.Quote(l.Value))
+	}
+	b.WriteByte('}')
+	return b.String()
+}
+
+// Bytes returns ls as a byte slice.
+// It uses an byte invalid character as a separator and so should not be used for printing.
+func (ls Labels) Bytes(buf []byte) []byte {
+	b := bytes.NewBuffer(buf[:0])
+	b.WriteByte(labelSep)
+	for i, l := range ls {
+		if i > 0 {
+			b.WriteByte(seps[0])
+		}
+		b.WriteString(l.Name)
+		b.WriteByte(seps[0])
+		b.WriteString(l.Value)
+	}
+	return b.Bytes()
+}
+
+// MarshalJSON implements json.Marshaler.
+func (ls Labels) MarshalJSON() ([]byte, error) {
+	return json.Marshal(ls.Map())
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (ls *Labels) UnmarshalJSON(b []byte) error {
+	var m map[string]string
+
+	if err := json.Unmarshal(b, &m); err != nil {
+		return err
+	}
+
+	*ls = FromMap(m)
+	return nil
+}
+
+// MarshalYAML implements yaml.Marshaler.
+func (ls Labels) MarshalYAML() (interface{}, error) {
+	return ls.Map(), nil
+}
+
+// UnmarshalYAML implements yaml.Unmarshaler.
+func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var m map[string]string
+
+	if err := unmarshal(&m); err != nil {
+		return err
+	}
+
+	*ls = FromMap(m)
+	return nil
+}
+
+// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
+// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
+func (ls Labels) MatchLabels(on bool, names ...string) Labels {
+	matchedLabels := Labels{}
+
+	nameSet := map[string]struct{}{}
+	for _, n := range names {
+		nameSet[n] = struct{}{}
+	}
+
+	for _, v := range ls {
+		if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) {
+			matchedLabels = append(matchedLabels, v)
+		}
+	}
+
+	return matchedLabels
+}
+
+// Hash returns a hash value for the label set.
+func (ls Labels) Hash() uint64 {
+	// Use xxhash.Sum64(b) for fast path as it's faster.
+	b := make([]byte, 0, 1024)
+	for i, v := range ls {
+		if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
+			// If labels entry is 1KB+ do not allocate whole entry.
+			h := xxhash.New()
+			_, _ = h.Write(b)
+			for _, v := range ls[i:] {
+				_, _ = h.WriteString(v.Name)
+				_, _ = h.Write(seps)
+				_, _ = h.WriteString(v.Value)
+				_, _ = h.Write(seps)
+			}
+			return h.Sum64()
+		}
+
+		b = append(b, v.Name...)
+		b = append(b, seps[0])
+		b = append(b, v.Value...)
+		b = append(b, seps[0])
+	}
+	return xxhash.Sum64(b)
+}
+
+// HashForLabels returns a hash value for the labels matching the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
+	b = b[:0]
+	i, j := 0, 0
+	for i < len(ls) && j < len(names) {
+		if names[j] < ls[i].Name {
+			j++
+		} else if ls[i].Name < names[j] {
+			i++
+		} else {
+			b = append(b, ls[i].Name...)
+			b = append(b, seps[0])
+			b = append(b, ls[i].Value...)
+			b = append(b, seps[0])
+			i++
+			j++
+		}
+	}
+	return xxhash.Sum64(b), b
+}
+
+// HashWithoutLabels returns a hash value for all labels except those matching
+// the provided names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
+	b = b[:0]
+	j := 0
+	for i := range ls {
+		for j < len(names) && names[j] < ls[i].Name {
+			j++
+		}
+		if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
+			continue
+		}
+		b = append(b, ls[i].Name...)
+		b = append(b, seps[0])
+		b = append(b, ls[i].Value...)
+		b = append(b, seps[0])
+	}
+	return xxhash.Sum64(b), b
+}
+
+// WithLabels returns a new labels.Labels from ls that only contains labels matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) WithLabels(names ...string) Labels {
+	ret := make([]Label, 0, len(ls))
+
+	i, j := 0, 0
+	for i < len(ls) && j < len(names) {
+		if names[j] < ls[i].Name {
+			j++
+		} else if ls[i].Name < names[j] {
+			i++
+		} else {
+			ret = append(ret, ls[i])
+			i++
+			j++
+		}
+	}
+	return ret
+}
+
+// WithoutLabels returns a new labels.Labels from ls that contains labels not matching names.
+// 'names' have to be sorted in ascending order.
+func (ls Labels) WithoutLabels(names ...string) Labels {
+	ret := make([]Label, 0, len(ls))
+
+	j := 0
+	for i := range ls {
+		for j < len(names) && names[j] < ls[i].Name {
+			j++
+		}
+		if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
+			continue
+		}
+		ret = append(ret, ls[i])
+	}
+	return ret
+}
+
+// Copy returns a copy of the labels.
+func (ls Labels) Copy() Labels {
+	res := make(Labels, len(ls))
+	copy(res, ls)
+	return res
+}
+
+// Get returns the value for the label with the given name.
+// Returns an empty string if the label doesn't exist.
+func (ls Labels) Get(name string) string {
+	for _, l := range ls {
+		if l.Name == name {
+			return l.Value
+		}
+	}
+	return ""
+}
+
+// Has returns true if the label with the given name is present.
+func (ls Labels) Has(name string) bool {
+	for _, l := range ls {
+		if l.Name == name {
+			return true
+		}
+	}
+	return false
+}
+
+// HasDuplicateLabelNames returns whether ls has duplicate label names.
+// It assumes that the labelset is sorted.
+func (ls Labels) HasDuplicateLabelNames() (string, bool) {
+	for i, l := range ls {
+		if i == 0 {
+			continue
+		}
+		if l.Name == ls[i-1].Name {
+			return l.Name, true
+		}
+	}
+	return "", false
+}
+
+// WithoutEmpty returns the labelset without empty labels.
+// May return the same labelset.
+func (ls Labels) WithoutEmpty() Labels {
+	for _, v := range ls {
+		if v.Value != "" {
+			continue
+		}
+		// Do not copy the slice until it's necessary.
+		els := make(Labels, 0, len(ls)-1)
+		for _, v := range ls {
+			if v.Value != "" {
+				els = append(els, v)
+			}
+		}
+		return els
+	}
+	return ls
+}
+
+// Equal returns whether the two label sets are equal.
+func Equal(ls, o Labels) bool {
+	if len(ls) != len(o) {
+		return false
+	}
+	for i, l := range ls {
+		if l.Name != o[i].Name || l.Value != o[i].Value {
+			return false
+		}
+	}
+	return true
+}
+
+// Map returns a string map of the labels.
+func (ls Labels) Map() map[string]string {
+	m := make(map[string]string, len(ls))
+	for _, l := range ls {
+		m[l.Name] = l.Value
+	}
+	return m
+}
+
+// New returns a sorted Labels from the given labels.
+// The caller has to guarantee that all label names are unique.
+func New(ls ...Label) Labels {
+	set := make(Labels, 0, len(ls))
+	for _, l := range ls {
+		set = append(set, l)
+	}
+	sort.Sort(set)
+
+	return set
+}
+
+// FromMap returns new sorted Labels from the given map.
+func FromMap(m map[string]string) Labels {
+	l := make([]Label, 0, len(m))
+	for k, v := range m {
+		l = append(l, Label{Name: k, Value: v})
+	}
+	return New(l...)
+}
+
+// FromStrings creates new labels from pairs of strings.
+func FromStrings(ss ...string) Labels {
+	if len(ss)%2 != 0 {
+		panic("invalid number of strings")
+	}
+	var res Labels
+	for i := 0; i < len(ss); i += 2 {
+		res = append(res, Label{Name: ss[i], Value: ss[i+1]})
+	}
+
+	sort.Sort(res)
+	return res
+}
+
+// Compare compares the two label sets.
+// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
+func Compare(a, b Labels) int {
+	l := len(a)
+	if len(b) < l {
+		l = len(b)
+	}
+
+	for i := 0; i < l; i++ {
+		if a[i].Name != b[i].Name {
+			if a[i].Name < b[i].Name {
+				return -1
+			}
+			return 1
+		}
+		if a[i].Value != b[i].Value {
+			if a[i].Value < b[i].Value {
+				return -1
+			}
+			return 1
+		}
+	}
+	// If all labels so far were in common, the set with fewer labels comes first.
+	return len(a) - len(b)
+}
+
+// Builder allows modifying Labels.
+type Builder struct {
+	base Labels
+	del  []string
+	add  []Label
+}
+
+// NewBuilder returns a new LabelsBuilder.
+func NewBuilder(base Labels) *Builder {
+	b := &Builder{
+		del: make([]string, 0, 5),
+		add: make([]Label, 0, 5),
+	}
+	b.Reset(base)
+	return b
+}
+
+// Reset clears all current state for the builder.
+func (b *Builder) Reset(base Labels) {
+	b.base = base
+	b.del = b.del[:0]
+	b.add = b.add[:0]
+	for _, l := range b.base {
+		if l.Value == "" {
+			b.del = append(b.del, l.Name)
+		}
+	}
+}
+
+// Del deletes the label of the given name.
+func (b *Builder) Del(ns ...string) *Builder {
+	for _, n := range ns {
+		for i, a := range b.add {
+			if a.Name == n {
+				b.add = append(b.add[:i], b.add[i+1:]...)
+			}
+		}
+		b.del = append(b.del, n)
+	}
+	return b
+}
+
+// Set the name/value pair as a label.
+func (b *Builder) Set(n, v string) *Builder {
+	if v == "" {
+		// Empty labels are the same as missing labels.
+		return b.Del(n)
+	}
+	for i, a := range b.add {
+		if a.Name == n {
+			b.add[i].Value = v
+			return b
+		}
+	}
+	b.add = append(b.add, Label{Name: n, Value: v})
+
+	return b
+}
+
+// Labels returns the labels from the builder. If no modifications
+// were made, the original labels are returned.
+func (b *Builder) Labels() Labels {
+	if len(b.del) == 0 && len(b.add) == 0 {
+		return b.base
+	}
+
+	// In the general case, labels are removed, modified or moved
+	// rather than added.
+	res := make(Labels, 0, len(b.base))
+Outer:
+	for _, l := range b.base {
+		for _, n := range b.del {
+			if l.Name == n {
+				continue Outer
+			}
+		}
+		for _, la := range b.add {
+			if l.Name == la.Name {
+				continue Outer
+			}
+		}
+		res = append(res, l)
+	}
+	res = append(res, b.add...)
+	sort.Sort(res)
+
+	return res
+}
diff -pruN 2.31.2+ds1-1/model/labels/labels_test.go 2.33.5+ds1-2/model/labels/labels_test.go
--- 2.31.2+ds1-1/model/labels/labels_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/labels_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,737 @@
+// Copyright 2019 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"fmt"
+	"strings"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestLabels_String(t *testing.T) {
+	cases := []struct {
+		lables   Labels
+		expected string
+	}{
+		{
+			lables: Labels{
+				{
+					Name:  "t1",
+					Value: "t1",
+				},
+				{
+					Name:  "t2",
+					Value: "t2",
+				},
+			},
+			expected: "{t1=\"t1\", t2=\"t2\"}",
+		},
+		{
+			lables:   Labels{},
+			expected: "{}",
+		},
+		{
+			lables:   nil,
+			expected: "{}",
+		},
+	}
+	for _, c := range cases {
+		str := c.lables.String()
+		require.Equal(t, c.expected, str)
+	}
+}
+
+func TestLabels_MatchLabels(t *testing.T) {
+	labels := Labels{
+		{
+			Name:  "__name__",
+			Value: "ALERTS",
+		},
+		{
+			Name:  "alertname",
+			Value: "HTTPRequestRateLow",
+		},
+		{
+			Name:  "alertstate",
+			Value: "pending",
+		},
+		{
+			Name:  "instance",
+			Value: "0",
+		},
+		{
+			Name:  "job",
+			Value: "app-server",
+		},
+		{
+			Name:  "severity",
+			Value: "critical",
+		},
+	}
+
+	tests := []struct {
+		providedNames []string
+		on            bool
+		expected      Labels
+	}{
+		// on = true, explicitly including metric name in matching.
+		{
+			providedNames: []string{
+				"__name__",
+				"alertname",
+				"alertstate",
+				"instance",
+			},
+			on: true,
+			expected: Labels{
+				{
+					Name:  "__name__",
+					Value: "ALERTS",
+				},
+				{
+					Name:  "alertname",
+					Value: "HTTPRequestRateLow",
+				},
+				{
+					Name:  "alertstate",
+					Value: "pending",
+				},
+				{
+					Name:  "instance",
+					Value: "0",
+				},
+			},
+		},
+		// on = false, explicitly excluding metric name from matching.
+		{
+			providedNames: []string{
+				"__name__",
+				"alertname",
+				"alertstate",
+				"instance",
+			},
+			on: false,
+			expected: Labels{
+				{
+					Name:  "job",
+					Value: "app-server",
+				},
+				{
+					Name:  "severity",
+					Value: "critical",
+				},
+			},
+		},
+		// on = true, explicitly excluding metric name from matching.
+		{
+			providedNames: []string{
+				"alertname",
+				"alertstate",
+				"instance",
+			},
+			on: true,
+			expected: Labels{
+				{
+					Name:  "alertname",
+					Value: "HTTPRequestRateLow",
+				},
+				{
+					Name:  "alertstate",
+					Value: "pending",
+				},
+				{
+					Name:  "instance",
+					Value: "0",
+				},
+			},
+		},
+		// on = false, implicitly excluding metric name from matching.
+		{
+			providedNames: []string{
+				"alertname",
+				"alertstate",
+				"instance",
+			},
+			on: false,
+			expected: Labels{
+				{
+					Name:  "job",
+					Value: "app-server",
+				},
+				{
+					Name:  "severity",
+					Value: "critical",
+				},
+			},
+		},
+	}
+
+	for i, test := range tests {
+		got := labels.MatchLabels(test.on, test.providedNames...)
+		require.Equal(t, test.expected, got, "unexpected labelset for test case %d", i)
+	}
+}
+
+func TestLabels_HasDuplicateLabelNames(t *testing.T) {
+	cases := []struct {
+		Input     Labels
+		Duplicate bool
+		LabelName string
+	}{
+		{
+			Input:     FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}),
+			Duplicate: false,
+		}, {
+			Input: append(
+				FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}),
+				FromMap(map[string]string{"hostname": "127.0.0.1"})...,
+			),
+			Duplicate: true,
+			LabelName: "hostname",
+		},
+	}
+
+	for i, c := range cases {
+		l, d := c.Input.HasDuplicateLabelNames()
+		require.Equal(t, c.Duplicate, d, "test %d: incorrect duplicate bool", i)
+		require.Equal(t, c.LabelName, l, "test %d: incorrect label name", i)
+	}
+}
+
+func TestLabels_WithoutEmpty(t *testing.T) {
+	for _, test := range []struct {
+		input    Labels
+		expected Labels
+	}{
+		{
+			input: Labels{
+				{Name: "foo"},
+				{Name: "bar"},
+			},
+			expected: Labels{},
+		},
+		{
+			input: Labels{
+				{Name: "foo"},
+				{Name: "bar"},
+				{Name: "baz"},
+			},
+			expected: Labels{},
+		},
+		{
+			input: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "job", Value: "check"},
+			},
+			expected: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "job", Value: "check"},
+			},
+		},
+		{
+			input: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "bar"},
+				{Name: "job", Value: "check"},
+			},
+			expected: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "job", Value: "check"},
+			},
+		},
+		{
+			input: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "foo"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "bar"},
+				{Name: "job", Value: "check"},
+			},
+			expected: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "job", Value: "check"},
+			},
+		},
+		{
+			input: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "foo"},
+				{Name: "baz"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "bar"},
+				{Name: "job", Value: "check"},
+			},
+			expected: Labels{
+				{Name: "__name__", Value: "test"},
+				{Name: "hostname", Value: "localhost"},
+				{Name: "job", Value: "check"},
+			},
+		},
+	} {
+		t.Run("", func(t *testing.T) {
+			require.Equal(t, test.expected, test.input.WithoutEmpty())
+		})
+	}
+}
+
+func TestLabels_Equal(t *testing.T) {
+	labels := Labels{
+		{
+			Name:  "aaa",
+			Value: "111",
+		},
+		{
+			Name:  "bbb",
+			Value: "222",
+		},
+	}
+
+	tests := []struct {
+		compared Labels
+		expected bool
+	}{
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbb",
+					Value: "222",
+				},
+				{
+					Name:  "ccc",
+					Value: "333",
+				},
+			},
+			expected: false,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bar",
+					Value: "222",
+				},
+			},
+			expected: false,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbb",
+					Value: "233",
+				},
+			},
+			expected: false,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbb",
+					Value: "222",
+				},
+			},
+			expected: true,
+		},
+	}
+
+	for i, test := range tests {
+		got := Equal(labels, test.compared)
+		require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
+	}
+}
+
+func TestLabels_FromStrings(t *testing.T) {
+	labels := FromStrings("aaa", "111", "bbb", "222")
+	expected := Labels{
+		{
+			Name:  "aaa",
+			Value: "111",
+		},
+		{
+			Name:  "bbb",
+			Value: "222",
+		},
+	}
+
+	require.Equal(t, expected, labels, "unexpected labelset")
+
+	require.Panics(t, func() { FromStrings("aaa", "111", "bbb") }) //nolint:staticcheck // Ignore SA5012, error is intentional test.
+}
+
+func TestLabels_Compare(t *testing.T) {
+	labels := Labels{
+		{
+			Name:  "aaa",
+			Value: "111",
+		},
+		{
+			Name:  "bbb",
+			Value: "222",
+		},
+	}
+
+	tests := []struct {
+		compared Labels
+		expected int
+	}{
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "110",
+				},
+				{
+					Name:  "bbb",
+					Value: "222",
+				},
+			},
+			expected: 1,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbb",
+					Value: "233",
+				},
+			},
+			expected: -1,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bar",
+					Value: "222",
+				},
+			},
+			expected: 1,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbc",
+					Value: "222",
+				},
+			},
+			expected: -1,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+			},
+			expected: 1,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbb",
+					Value: "222",
+				},
+				{
+					Name:  "ccc",
+					Value: "333",
+				},
+				{
+					Name:  "ddd",
+					Value: "444",
+				},
+			},
+			expected: -2,
+		},
+		{
+			compared: Labels{
+				{
+					Name:  "aaa",
+					Value: "111",
+				},
+				{
+					Name:  "bbb",
+					Value: "222",
+				},
+			},
+			expected: 0,
+		},
+	}
+
+	for i, test := range tests {
+		got := Compare(labels, test.compared)
+		require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
+	}
+}
+
+func TestLabels_Has(t *testing.T) {
+	tests := []struct {
+		input    string
+		expected bool
+	}{
+		{
+			input:    "foo",
+			expected: false,
+		},
+		{
+			input:    "aaa",
+			expected: true,
+		},
+	}
+
+	labelsSet := Labels{
+		{
+			Name:  "aaa",
+			Value: "111",
+		},
+		{
+			Name:  "bbb",
+			Value: "222",
+		},
+	}
+
+	for i, test := range tests {
+		got := labelsSet.Has(test.input)
+		require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
+	}
+}
+
+func TestLabels_Get(t *testing.T) {
+	require.Equal(t, "", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("foo"))
+	require.Equal(t, "111", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("aaa"))
+}
+
+// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
+// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
+// In the following list, `old` is the linear search while `new` is the binary search implementaiton (without calling sort.Search, which performs even worse here)
+// name                                        old time/op    new time/op    delta
+// Labels_Get/with_5_labels/get_first_label      5.12ns ± 0%   14.24ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_5_labels/get_middle_label     13.5ns ± 0%    18.5ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_5_labels/get_last_label       21.9ns ± 0%    18.9ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_10_labels/get_first_label     5.11ns ± 0%   19.47ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_10_labels/get_middle_label    26.2ns ± 0%    19.3ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_10_labels/get_last_label      42.8ns ± 0%    23.4ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_30_labels/get_first_label     5.10ns ± 0%   24.63ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_30_labels/get_middle_label    75.8ns ± 0%    29.7ns ± 0%   ~     (p=1.000 n=1+1)
+// Labels_Get/with_30_labels/get_last_label       169ns ± 0%      29ns ± 0%   ~     (p=1.000 n=1+1)
+func BenchmarkLabels_Get(b *testing.B) {
+	maxLabels := 30
+	allLabels := make(Labels, maxLabels)
+	for i := 0; i < maxLabels; i++ {
+		allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)}
+	}
+	for _, size := range []int{5, 10, maxLabels} {
+		b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
+			labels := allLabels[:size]
+			for _, scenario := range []struct {
+				desc, label string
+			}{
+				{"get first label", labels[0].Name},
+				{"get middle label", labels[size/2].Name},
+				{"get last label", labels[size-1].Name},
+			} {
+				b.Run(scenario.desc, func(b *testing.B) {
+					b.ResetTimer()
+					for i := 0; i < b.N; i++ {
+						_ = labels.Get(scenario.label)
+					}
+				})
+			}
+		})
+	}
+}
+
+func TestLabels_Copy(t *testing.T) {
+	require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Copy())
+}
+
+func TestLabels_Map(t *testing.T) {
+	require.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map())
+}
+
+func TestLabels_WithLabels(t *testing.T) {
+	require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithLabels("aaa", "bbb"))
+}
+
+func TestLabels_WithoutLabels(t *testing.T) {
+	require.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithoutLabels("bbb", "ccc"))
+	require.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {MetricName, "333"}}.WithoutLabels("bbb"))
+}
+
+func TestBulider_NewBulider(t *testing.T) {
+	require.Equal(
+		t,
+		&Builder{
+			base: Labels{{"aaa", "111"}},
+			del:  []string{},
+			add:  []Label{},
+		},
+		NewBuilder(Labels{{"aaa", "111"}}),
+	)
+}
+
+func TestBuilder_Del(t *testing.T) {
+	require.Equal(
+		t,
+		&Builder{
+			del: []string{"bbb"},
+			add: []Label{{"aaa", "111"}, {"ccc", "333"}},
+		},
+		(&Builder{
+			del: []string{},
+			add: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
+		}).Del("bbb"),
+	)
+}
+
+func TestBuilder_Set(t *testing.T) {
+	require.Equal(
+		t,
+		&Builder{
+			base: Labels{{"aaa", "111"}},
+			del:  []string{},
+			add:  []Label{{"bbb", "222"}},
+		},
+		(&Builder{
+			base: Labels{{"aaa", "111"}},
+			del:  []string{},
+			add:  []Label{},
+		}).Set("bbb", "222"),
+	)
+
+	require.Equal(
+		t,
+		&Builder{
+			base: Labels{{"aaa", "111"}},
+			del:  []string{},
+			add:  []Label{{"bbb", "333"}},
+		},
+		(&Builder{
+			base: Labels{{"aaa", "111"}},
+			del:  []string{},
+			add:  []Label{{"bbb", "222"}},
+		}).Set("bbb", "333"),
+	)
+}
+
+func TestBuilder_Labels(t *testing.T) {
+	require.Equal(
+		t,
+		Labels{{"aaa", "111"}, {"ccc", "333"}, {"ddd", "444"}},
+		(&Builder{
+			base: Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
+			del:  []string{"bbb"},
+			add:  []Label{{"ddd", "444"}},
+		}).Labels(),
+	)
+}
+
+func TestLabels_Hash(t *testing.T) {
+	lbls := Labels{
+		{Name: "foo", Value: "bar"},
+		{Name: "baz", Value: "qux"},
+	}
+	require.Equal(t, lbls.Hash(), lbls.Hash())
+	require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
+	require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
+}
+
+var benchmarkLabelsResult uint64
+
+func BenchmarkLabels_Hash(b *testing.B) {
+	for _, tcase := range []struct {
+		name string
+		lbls Labels
+	}{
+		{
+			name: "typical labels under 1KB",
+			lbls: func() Labels {
+				lbls := make(Labels, 10)
+				for i := 0; i < len(lbls); i++ {
+					// Label ~20B name, 50B value.
+					lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
+				}
+				return lbls
+			}(),
+		},
+		{
+			name: "bigger labels over 1KB",
+			lbls: func() Labels {
+				lbls := make(Labels, 10)
+				for i := 0; i < len(lbls); i++ {
+					// Label ~50B name, 50B value.
+					lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
+				}
+				return lbls
+			}(),
+		},
+		{
+			name: "extremely large label value 10MB",
+			lbls: func() Labels {
+				lbl := &strings.Builder{}
+				lbl.Grow(1024 * 1024 * 10) // 10MB.
+				word := "abcdefghij"
+				for i := 0; i < lbl.Cap()/len(word); i++ {
+					_, _ = lbl.WriteString(word)
+				}
+				return Labels{{Name: "__name__", Value: lbl.String()}}
+			}(),
+		},
+	} {
+		b.Run(tcase.name, func(b *testing.B) {
+			var h uint64
+
+			b.ReportAllocs()
+			b.ResetTimer()
+			for i := 0; i < b.N; i++ {
+				h = tcase.lbls.Hash()
+			}
+			benchmarkLabelsResult = h
+		})
+	}
+}
diff -pruN 2.31.2+ds1-1/model/labels/matcher.go 2.33.5+ds1-2/model/labels/matcher.go
--- 2.31.2+ds1-1/model/labels/matcher.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/matcher.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,120 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"fmt"
+)
+
+// MatchType is an enum for label matching types.
+type MatchType int
+
+// Possible MatchTypes.
+const (
+	MatchEqual MatchType = iota
+	MatchNotEqual
+	MatchRegexp
+	MatchNotRegexp
+)
+
+var matchTypeToStr = [...]string{
+	MatchEqual:     "=",
+	MatchNotEqual:  "!=",
+	MatchRegexp:    "=~",
+	MatchNotRegexp: "!~",
+}
+
+func (m MatchType) String() string {
+	if m < MatchEqual || m > MatchNotRegexp {
+		panic("unknown match type")
+	}
+	return matchTypeToStr[m]
+}
+
+// Matcher models the matching of a label.
+type Matcher struct {
+	Type  MatchType
+	Name  string
+	Value string
+
+	re *FastRegexMatcher
+}
+
+// NewMatcher returns a matcher object.
+func NewMatcher(t MatchType, n, v string) (*Matcher, error) {
+	m := &Matcher{
+		Type:  t,
+		Name:  n,
+		Value: v,
+	}
+	if t == MatchRegexp || t == MatchNotRegexp {
+		re, err := NewFastRegexMatcher(v)
+		if err != nil {
+			return nil, err
+		}
+		m.re = re
+	}
+	return m, nil
+}
+
+// MustNewMatcher panics on error - only for use in tests!
+func MustNewMatcher(mt MatchType, name, val string) *Matcher {
+	m, err := NewMatcher(mt, name, val)
+	if err != nil {
+		panic(err)
+	}
+	return m
+}
+
+func (m *Matcher) String() string {
+	return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
+}
+
+// Matches returns whether the matcher matches the given string value.
+func (m *Matcher) Matches(s string) bool {
+	switch m.Type {
+	case MatchEqual:
+		return s == m.Value
+	case MatchNotEqual:
+		return s != m.Value
+	case MatchRegexp:
+		return m.re.MatchString(s)
+	case MatchNotRegexp:
+		return !m.re.MatchString(s)
+	}
+	panic("labels.Matcher.Matches: invalid match type")
+}
+
+// Inverse returns a matcher that matches the opposite.
+func (m *Matcher) Inverse() (*Matcher, error) {
+	switch m.Type {
+	case MatchEqual:
+		return NewMatcher(MatchNotEqual, m.Name, m.Value)
+	case MatchNotEqual:
+		return NewMatcher(MatchEqual, m.Name, m.Value)
+	case MatchRegexp:
+		return NewMatcher(MatchNotRegexp, m.Name, m.Value)
+	case MatchNotRegexp:
+		return NewMatcher(MatchRegexp, m.Name, m.Value)
+	}
+	panic("labels.Matcher.Matches: invalid match type")
+}
+
+// GetRegexString returns the regex string.
+func (m *Matcher) GetRegexString() string {
+	if m.re == nil {
+		return ""
+	}
+	return m.re.GetRegexString()
+}
diff -pruN 2.31.2+ds1-1/model/labels/matcher_test.go 2.33.5+ds1-2/model/labels/matcher_test.go
--- 2.31.2+ds1-1/model/labels/matcher_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/matcher_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,125 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher {
+	m, err := NewMatcher(mType, "", value)
+	require.NoError(t, err)
+	return m
+}
+
+func TestMatcher(t *testing.T) {
+	tests := []struct {
+		matcher *Matcher
+		value   string
+		match   bool
+	}{
+		{
+			matcher: mustNewMatcher(t, MatchEqual, "bar"),
+			value:   "bar",
+			match:   true,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchEqual, "bar"),
+			value:   "foo-bar",
+			match:   false,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchNotEqual, "bar"),
+			value:   "bar",
+			match:   false,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchNotEqual, "bar"),
+			value:   "foo-bar",
+			match:   true,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchRegexp, "bar"),
+			value:   "bar",
+			match:   true,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchRegexp, "bar"),
+			value:   "foo-bar",
+			match:   false,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchRegexp, ".*bar"),
+			value:   "foo-bar",
+			match:   true,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchNotRegexp, "bar"),
+			value:   "bar",
+			match:   false,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchNotRegexp, "bar"),
+			value:   "foo-bar",
+			match:   true,
+		},
+		{
+			matcher: mustNewMatcher(t, MatchNotRegexp, ".*bar"),
+			value:   "foo-bar",
+			match:   false,
+		},
+	}
+
+	for _, test := range tests {
+		require.Equal(t, test.matcher.Matches(test.value), test.match)
+	}
+}
+
+func TestInverse(t *testing.T) {
+	tests := []struct {
+		matcher  *Matcher
+		expected *Matcher
+	}{
+		{
+			matcher:  &Matcher{Type: MatchEqual, Name: "name1", Value: "value1"},
+			expected: &Matcher{Type: MatchNotEqual, Name: "name1", Value: "value1"},
+		},
+		{
+			matcher:  &Matcher{Type: MatchNotEqual, Name: "name2", Value: "value2"},
+			expected: &Matcher{Type: MatchEqual, Name: "name2", Value: "value2"},
+		},
+		{
+			matcher:  &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3"},
+			expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3"},
+		},
+		{
+			matcher:  &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4"},
+			expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4"},
+		},
+	}
+
+	for _, test := range tests {
+		result, err := test.matcher.Inverse()
+		require.NoError(t, err)
+		require.Equal(t, test.expected.Type, result.Type)
+	}
+}
+
+func BenchmarkMatchType_String(b *testing.B) {
+	for i := 0; i <= b.N; i++ {
+		_ = MatchType(i % int(MatchNotRegexp+1)).String()
+	}
+}
diff -pruN 2.31.2+ds1-1/model/labels/regexp.go 2.33.5+ds1-2/model/labels/regexp.go
--- 2.31.2+ds1-1/model/labels/regexp.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/regexp.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,107 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"regexp"
+	"regexp/syntax"
+	"strings"
+)
+
+type FastRegexMatcher struct {
+	re       *regexp.Regexp
+	prefix   string
+	suffix   string
+	contains string
+}
+
+func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
+	re, err := regexp.Compile("^(?:" + v + ")$")
+	if err != nil {
+		return nil, err
+	}
+
+	parsed, err := syntax.Parse(v, syntax.Perl)
+	if err != nil {
+		return nil, err
+	}
+
+	m := &FastRegexMatcher{
+		re: re,
+	}
+
+	if parsed.Op == syntax.OpConcat {
+		m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
+	}
+
+	return m, nil
+}
+
+func (m *FastRegexMatcher) MatchString(s string) bool {
+	if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
+		return false
+	}
+	if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
+		return false
+	}
+	if m.contains != "" && !strings.Contains(s, m.contains) {
+		return false
+	}
+	return m.re.MatchString(s)
+}
+
+func (m *FastRegexMatcher) GetRegexString() string {
+	return m.re.String()
+}
+
+// optimizeConcatRegex returns literal prefix/suffix text that can be safely
+// checked against the label value before running the regexp matcher.
+func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
+	sub := r.Sub
+
+	// We can safely remove begin and end text matchers respectively
+	// at the beginning and end of the regexp.
+	if len(sub) > 0 && sub[0].Op == syntax.OpBeginText {
+		sub = sub[1:]
+	}
+	if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText {
+		sub = sub[:len(sub)-1]
+	}
+
+	if len(sub) == 0 {
+		return
+	}
+
+	// Given Prometheus regex matchers are always anchored to the begin/end
+	// of the text, if the first/last operations are literals, we can safely
+	// treat them as prefix/suffix.
+	if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 {
+		prefix = string(sub[0].Rune)
+	}
+	if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 {
+		suffix = string(sub[last].Rune)
+	}
+
+	// If contains any literal which is not a prefix/suffix, we keep the
+	// 1st one. We do not keep the whole list of literals to simplify the
+	// fast path.
+	for i := 1; i < len(sub)-1; i++ {
+		if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
+			contains = string(sub[i].Rune)
+			break
+		}
+	}
+
+	return
+}
diff -pruN 2.31.2+ds1-1/model/labels/regexp_test.go 2.33.5+ds1-2/model/labels/regexp_test.go
--- 2.31.2+ds1-1/model/labels/regexp_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/regexp_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,98 @@
+// Copyright 2020 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"regexp/syntax"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+)
+
+func TestNewFastRegexMatcher(t *testing.T) {
+	cases := []struct {
+		regex    string
+		value    string
+		expected bool
+	}{
+		{regex: "(foo|bar)", value: "foo", expected: true},
+		{regex: "(foo|bar)", value: "foo bar", expected: false},
+		{regex: "(foo|bar)", value: "bar", expected: true},
+		{regex: "foo.*", value: "foo bar", expected: true},
+		{regex: "foo.*", value: "bar foo", expected: false},
+		{regex: ".*foo", value: "foo bar", expected: false},
+		{regex: ".*foo", value: "bar foo", expected: true},
+		{regex: ".*foo", value: "foo", expected: true},
+		{regex: "^.*foo$", value: "foo", expected: true},
+		{regex: "^.+foo$", value: "foo", expected: false},
+		{regex: "^.+foo$", value: "bfoo", expected: true},
+		{regex: ".*", value: "\n", expected: false},
+		{regex: ".*", value: "\nfoo", expected: false},
+		{regex: ".*foo", value: "\nfoo", expected: false},
+		{regex: "foo.*", value: "foo\n", expected: false},
+		{regex: "foo\n.*", value: "foo\n", expected: true},
+		{regex: ".*foo.*", value: "foo", expected: true},
+		{regex: ".*foo.*", value: "foo bar", expected: true},
+		{regex: ".*foo.*", value: "hello foo world", expected: true},
+		{regex: ".*foo.*", value: "hello foo\n world", expected: false},
+		{regex: ".*foo\n.*", value: "hello foo\n world", expected: true},
+		{regex: ".*", value: "foo", expected: true},
+		{regex: "", value: "foo", expected: false},
+		{regex: "", value: "", expected: true},
+	}
+
+	for _, c := range cases {
+		m, err := NewFastRegexMatcher(c.regex)
+		require.NoError(t, err)
+		require.Equal(t, c.expected, m.MatchString(c.value))
+	}
+}
+
+func TestOptimizeConcatRegex(t *testing.T) {
+	cases := []struct {
+		regex    string
+		prefix   string
+		suffix   string
+		contains string
+	}{
+		{regex: "foo(hello|bar)", prefix: "foo", suffix: "", contains: ""},
+		{regex: "foo(hello|bar)world", prefix: "foo", suffix: "world", contains: ""},
+		{regex: "foo.*", prefix: "foo", suffix: "", contains: ""},
+		{regex: "foo.*hello.*bar", prefix: "foo", suffix: "bar", contains: "hello"},
+		{regex: ".*foo", prefix: "", suffix: "foo", contains: ""},
+		{regex: "^.*foo$", prefix: "", suffix: "foo", contains: ""},
+		{regex: ".*foo.*", prefix: "", suffix: "", contains: "foo"},
+		{regex: ".*foo.*bar.*", prefix: "", suffix: "", contains: "foo"},
+		{regex: ".*(foo|bar).*", prefix: "", suffix: "", contains: ""},
+		{regex: ".*[abc].*", prefix: "", suffix: "", contains: ""},
+		{regex: ".*((?i)abc).*", prefix: "", suffix: "", contains: ""},
+		{regex: ".*(?i:abc).*", prefix: "", suffix: "", contains: ""},
+		{regex: "(?i:abc).*", prefix: "", suffix: "", contains: ""},
+		{regex: ".*(?i:abc)", prefix: "", suffix: "", contains: ""},
+		{regex: ".*(?i:abc)def.*", prefix: "", suffix: "", contains: "def"},
+		{regex: "(?i).*(?-i:abc)def", prefix: "", suffix: "", contains: "abc"},
+		{regex: ".*(?msU:abc).*", prefix: "", suffix: "", contains: "abc"},
+		{regex: "[aA]bc.*", prefix: "", suffix: "", contains: "bc"},
+	}
+
+	for _, c := range cases {
+		parsed, err := syntax.Parse(c.regex, syntax.Perl)
+		require.NoError(t, err)
+
+		prefix, suffix, contains := optimizeConcatRegex(parsed)
+		require.Equal(t, c.prefix, prefix)
+		require.Equal(t, c.suffix, suffix)
+		require.Equal(t, c.contains, contains)
+	}
+}
diff -pruN 2.31.2+ds1-1/model/labels/test_utils.go 2.33.5+ds1-2/model/labels/test_utils.go
--- 2.31.2+ds1-1/model/labels/test_utils.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/labels/test_utils.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,87 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package labels
+
+import (
+	"bufio"
+	"os"
+	"sort"
+	"strings"
+
+	"github.com/pkg/errors"
+)
+
+// Slice is a sortable slice of label sets.
+type Slice []Labels
+
+func (s Slice) Len() int           { return len(s) }
+func (s Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
+
+// Selector holds constraints for matching against a label set.
+type Selector []*Matcher
+
+// Matches returns whether the labels satisfy all matchers.
+func (s Selector) Matches(labels Labels) bool {
+	for _, m := range s {
+		if v := labels.Get(m.Name); !m.Matches(v) {
+			return false
+		}
+	}
+	return true
+}
+
+// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful
+// to load testing data.
+func ReadLabels(fn string, n int) ([]Labels, error) {
+	f, err := os.Open(fn)
+	if err != nil {
+		return nil, err
+	}
+	defer f.Close()
+
+	scanner := bufio.NewScanner(f)
+
+	var mets []Labels
+	hashes := map[uint64]struct{}{}
+	i := 0
+
+	for scanner.Scan() && i < n {
+		m := make(Labels, 0, 10)
+
+		r := strings.NewReplacer("\"", "", "{", "", "}", "")
+		s := r.Replace(scanner.Text())
+
+		labelChunks := strings.Split(s, ",")
+		for _, labelChunk := range labelChunks {
+			split := strings.Split(labelChunk, ":")
+			m = append(m, Label{Name: split[0], Value: split[1]})
+		}
+		// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
+		sort.Sort(m)
+
+		h := m.Hash()
+		if _, ok := hashes[h]; ok {
+			continue
+		}
+		mets = append(mets, m)
+		hashes[h] = struct{}{}
+		i++
+	}
+
+	if i != n {
+		return mets, errors.Errorf("requested %d metrics but found %d", n, i)
+	}
+	return mets, nil
+}
diff -pruN 2.31.2+ds1-1/model/relabel/relabel.go 2.33.5+ds1-2/model/relabel/relabel.go
--- 2.31.2+ds1-1/model/relabel/relabel.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/relabel/relabel.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,270 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package relabel
+
+import (
+	"crypto/md5"
+	"fmt"
+	"regexp"
+	"strings"
+
+	"github.com/pkg/errors"
+	"github.com/prometheus/common/model"
+
+	"github.com/prometheus/prometheus/model/labels"
+)
+
+var (
+	relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`)
+
+	DefaultRelabelConfig = Config{
+		Action:      Replace,
+		Separator:   ";",
+		Regex:       MustNewRegexp("(.*)"),
+		Replacement: "$1",
+	}
+)
+
+// Action is the action to be performed on relabeling.
+type Action string
+
+const (
+	// Replace performs a regex replacement.
+	Replace Action = "replace"
+	// Keep drops targets for which the input does not match the regex.
+	Keep Action = "keep"
+	// Drop drops targets for which the input does match the regex.
+	Drop Action = "drop"
+	// HashMod sets a label to the modulus of a hash of labels.
+	HashMod Action = "hashmod"
+	// LabelMap copies labels to other labelnames based on a regex.
+	LabelMap Action = "labelmap"
+	// LabelDrop drops any label matching the regex.
+	LabelDrop Action = "labeldrop"
+	// LabelKeep drops any label not matching the regex.
+	LabelKeep Action = "labelkeep"
+)
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	switch act := Action(strings.ToLower(s)); act {
+	case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep:
+		*a = act
+		return nil
+	}
+	return errors.Errorf("unknown relabel action %q", s)
+}
+
+// Config is the configuration for relabeling of target label sets.
+type Config struct {
+	// A list of labels from which values are taken and concatenated
+	// with the configured separator in order.
+	SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"`
+	// Separator is the string between concatenated values from the source labels.
+	Separator string `yaml:"separator,omitempty"`
+	// Regex against which the concatenation is matched.
+	Regex Regexp `yaml:"regex,omitempty"`
+	// Modulus to take of the hash of concatenated values from the source labels.
+	Modulus uint64 `yaml:"modulus,omitempty"`
+	// TargetLabel is the label to which the resulting string is written in a replacement.
+	// Regexp interpolation is allowed for the replace action.
+	TargetLabel string `yaml:"target_label,omitempty"`
+	// Replacement is the regex replacement pattern to be used.
+	Replacement string `yaml:"replacement,omitempty"`
+	// Action is the action to be performed for the relabeling.
+	Action Action `yaml:"action,omitempty"`
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	*c = DefaultRelabelConfig
+	type plain Config
+	if err := unmarshal((*plain)(c)); err != nil {
+		return err
+	}
+	if c.Regex.Regexp == nil {
+		c.Regex = MustNewRegexp("")
+	}
+	if c.Action == "" {
+		return errors.Errorf("relabel action cannot be empty")
+	}
+	if c.Modulus == 0 && c.Action == HashMod {
+		return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
+	}
+	if (c.Action == Replace || c.Action == HashMod) && c.TargetLabel == "" {
+		return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
+	}
+	if c.Action == Replace && !relabelTarget.MatchString(c.TargetLabel) {
+		return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
+	}
+	if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) {
+		return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
+	}
+	if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() {
+		return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
+	}
+
+	if c.Action == LabelDrop || c.Action == LabelKeep {
+		if c.SourceLabels != nil ||
+			c.TargetLabel != DefaultRelabelConfig.TargetLabel ||
+			c.Modulus != DefaultRelabelConfig.Modulus ||
+			c.Separator != DefaultRelabelConfig.Separator ||
+			c.Replacement != DefaultRelabelConfig.Replacement {
+			return errors.Errorf("%s action requires only 'regex', and no other fields", c.Action)
+		}
+	}
+
+	return nil
+}
+
+// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable.
+type Regexp struct {
+	*regexp.Regexp
+	original string
+}
+
+// NewRegexp creates a new anchored Regexp and returns an error if the
+// passed-in regular expression does not compile.
+func NewRegexp(s string) (Regexp, error) {
+	regex, err := regexp.Compile("^(?:" + s + ")$")
+	return Regexp{
+		Regexp:   regex,
+		original: s,
+	}, err
+}
+
+// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile.
+func MustNewRegexp(s string) Regexp {
+	re, err := NewRegexp(s)
+	if err != nil {
+		panic(err)
+	}
+	return re
+}
+
+// UnmarshalYAML implements the yaml.Unmarshaler interface.
+func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var s string
+	if err := unmarshal(&s); err != nil {
+		return err
+	}
+	r, err := NewRegexp(s)
+	if err != nil {
+		return err
+	}
+	*re = r
+	return nil
+}
+
+// MarshalYAML implements the yaml.Marshaler interface.
+func (re Regexp) MarshalYAML() (interface{}, error) {
+	if re.original != "" {
+		return re.original, nil
+	}
+	return nil, nil
+}
+
+// Process returns a relabeled copy of the given label set. The relabel configurations
+// are applied in order of input.
+// If a label set is dropped, nil is returned.
+// May return the input labelSet modified.
+func Process(labels labels.Labels, cfgs ...*Config) labels.Labels {
+	for _, cfg := range cfgs {
+		labels = relabel(labels, cfg)
+		if labels == nil {
+			return nil
+		}
+	}
+	return labels
+}
+
+func relabel(lset labels.Labels, cfg *Config) labels.Labels {
+	values := make([]string, 0, len(cfg.SourceLabels))
+	for _, ln := range cfg.SourceLabels {
+		values = append(values, lset.Get(string(ln)))
+	}
+	val := strings.Join(values, cfg.Separator)
+
+	lb := labels.NewBuilder(lset)
+
+	switch cfg.Action {
+	case Drop:
+		if cfg.Regex.MatchString(val) {
+			return nil
+		}
+	case Keep:
+		if !cfg.Regex.MatchString(val) {
+			return nil
+		}
+	case Replace:
+		indexes := cfg.Regex.FindStringSubmatchIndex(val)
+		// If there is no match no replacement must take place.
+		if indexes == nil {
+			break
+		}
+		target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes))
+		if !target.IsValid() {
+			lb.Del(cfg.TargetLabel)
+			break
+		}
+		res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes)
+		if len(res) == 0 {
+			lb.Del(cfg.TargetLabel)
+			break
+		}
+		lb.Set(string(target), string(res))
+	case HashMod:
+		mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus
+		lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
+	case LabelMap:
+		for _, l := range lset {
+			if cfg.Regex.MatchString(l.Name) {
+				res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement)
+				lb.Set(res, l.Value)
+			}
+		}
+	case LabelDrop:
+		for _, l := range lset {
+			if cfg.Regex.MatchString(l.Name) {
+				lb.Del(l.Name)
+			}
+		}
+	case LabelKeep:
+		for _, l := range lset {
+			if !cfg.Regex.MatchString(l.Name) {
+				lb.Del(l.Name)
+			}
+		}
+	default:
+		panic(errors.Errorf("relabel: unknown relabel action type %q", cfg.Action))
+	}
+
+	return lb.Labels()
+}
+
+// sum64 sums the md5 hash to an uint64.
+func sum64(hash [md5.Size]byte) uint64 {
+	var s uint64
+
+	for i, b := range hash {
+		shift := uint64((md5.Size - i - 1) * 8)
+
+		s |= uint64(b) << shift
+	}
+	return s
+}
diff -pruN 2.31.2+ds1-1/model/relabel/relabel_test.go 2.33.5+ds1-2/model/relabel/relabel_test.go
--- 2.31.2+ds1-1/model/relabel/relabel_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/relabel/relabel_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,480 @@
+// Copyright 2015 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package relabel
+
+import (
+	"testing"
+
+	"github.com/prometheus/common/model"
+	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/model/labels"
+)
+
+func TestRelabel(t *testing.T) {
+	tests := []struct {
+		input   labels.Labels
+		relabel []*Config
+		output  labels.Labels
+	}{
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+				"b": "bar",
+				"c": "baz",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("f(.*)"),
+					TargetLabel:  "d",
+					Separator:    ";",
+					Replacement:  "ch${1}-ch${1}",
+					Action:       Replace,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo",
+				"b": "bar",
+				"c": "baz",
+				"d": "choo-choo",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+				"b": "bar",
+				"c": "baz",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a", "b"},
+					Regex:        MustNewRegexp("f(.*);(.*)r"),
+					TargetLabel:  "a",
+					Separator:    ";",
+					Replacement:  "b${1}${2}m", // boobam
+					Action:       Replace,
+				},
+				{
+					SourceLabels: model.LabelNames{"c", "a"},
+					Regex:        MustNewRegexp("(b).*b(.*)ba(.*)"),
+					TargetLabel:  "d",
+					Separator:    ";",
+					Replacement:  "$1$2$2$3",
+					Action:       Replace,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "boobam",
+				"b": "bar",
+				"c": "baz",
+				"d": "boooom",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp(".*o.*"),
+					Action:       Drop,
+				}, {
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("f(.*)"),
+					TargetLabel:  "d",
+					Separator:    ";",
+					Replacement:  "ch$1-ch$1",
+					Action:       Replace,
+				},
+			},
+			output: nil,
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+				"b": "bar",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp(".*o.*"),
+					Action:       Drop,
+				},
+			},
+			output: nil,
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "abc",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp(".*(b).*"),
+					TargetLabel:  "d",
+					Separator:    ";",
+					Replacement:  "$1",
+					Action:       Replace,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "abc",
+				"d": "b",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("no-match"),
+					Action:       Drop,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("f|o"),
+					Action:       Drop,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("no-match"),
+					Action:       Keep,
+				},
+			},
+			output: nil,
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("f.*"),
+					Action:       Keep,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+		},
+		{
+			// No replacement must be applied if there is no match.
+			input: labels.FromMap(map[string]string{
+				"a": "boo",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("f"),
+					TargetLabel:  "b",
+					Replacement:  "bar",
+					Action:       Replace,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "boo",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo",
+				"b": "bar",
+				"c": "baz",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"c"},
+					TargetLabel:  "d",
+					Separator:    ";",
+					Action:       HashMod,
+					Modulus:      1000,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo",
+				"b": "bar",
+				"c": "baz",
+				"d": "976",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a": "foo\nbar",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					TargetLabel:  "b",
+					Separator:    ";",
+					Action:       HashMod,
+					Modulus:      1000,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo\nbar",
+				"b": "734",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a":  "foo",
+				"b1": "bar",
+				"b2": "baz",
+			}),
+			relabel: []*Config{
+				{
+					Regex:       MustNewRegexp("(b.*)"),
+					Replacement: "bar_${1}",
+					Action:      LabelMap,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a":      "foo",
+				"b1":     "bar",
+				"b2":     "baz",
+				"bar_b1": "bar",
+				"bar_b2": "baz",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a":             "foo",
+				"__meta_my_bar": "aaa",
+				"__meta_my_baz": "bbb",
+				"__meta_other":  "ccc",
+			}),
+			relabel: []*Config{
+				{
+					Regex:       MustNewRegexp("__meta_(my.*)"),
+					Replacement: "${1}",
+					Action:      LabelMap,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a":             "foo",
+				"__meta_my_bar": "aaa",
+				"__meta_my_baz": "bbb",
+				"__meta_other":  "ccc",
+				"my_bar":        "aaa",
+				"my_baz":        "bbb",
+			}),
+		},
+		{ // valid case
+			input: labels.FromMap(map[string]string{
+				"a": "some-name-value",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
+					Action:       Replace,
+					Replacement:  "${2}",
+					TargetLabel:  "${1}",
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a":    "some-name-value",
+				"name": "value",
+			}),
+		},
+		{ // invalid replacement ""
+			input: labels.FromMap(map[string]string{
+				"a": "some-name-value",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
+					Action:       Replace,
+					Replacement:  "${3}",
+					TargetLabel:  "${1}",
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "some-name-value",
+			}),
+		},
+		{ // invalid target_labels
+			input: labels.FromMap(map[string]string{
+				"a": "some-name-value",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
+					Action:       Replace,
+					Replacement:  "${1}",
+					TargetLabel:  "${3}",
+				},
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
+					Action:       Replace,
+					Replacement:  "${1}",
+					TargetLabel:  "0${3}",
+				},
+				{
+					SourceLabels: model.LabelNames{"a"},
+					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
+					Action:       Replace,
+					Replacement:  "${1}",
+					TargetLabel:  "-${3}",
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "some-name-value",
+			}),
+		},
+		{ // more complex real-life like usecase
+			input: labels.FromMap(map[string]string{
+				"__meta_sd_tags": "path:/secret,job:some-job,label:foo=bar",
+			}),
+			relabel: []*Config{
+				{
+					SourceLabels: model.LabelNames{"__meta_sd_tags"},
+					Regex:        MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"),
+					Action:       Replace,
+					Replacement:  "${1}",
+					TargetLabel:  "__metrics_path__",
+				},
+				{
+					SourceLabels: model.LabelNames{"__meta_sd_tags"},
+					Regex:        MustNewRegexp("(?:.+,|^)job:([^,]+).*"),
+					Action:       Replace,
+					Replacement:  "${1}",
+					TargetLabel:  "job",
+				},
+				{
+					SourceLabels: model.LabelNames{"__meta_sd_tags"},
+					Regex:        MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"),
+					Action:       Replace,
+					Replacement:  "${2}",
+					TargetLabel:  "${1}",
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"__meta_sd_tags":   "path:/secret,job:some-job,label:foo=bar",
+				"__metrics_path__": "/secret",
+				"job":              "some-job",
+				"foo":              "bar",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a":  "foo",
+				"b1": "bar",
+				"b2": "baz",
+			}),
+			relabel: []*Config{
+				{
+					Regex:  MustNewRegexp("(b.*)"),
+					Action: LabelKeep,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"b1": "bar",
+				"b2": "baz",
+			}),
+		},
+		{
+			input: labels.FromMap(map[string]string{
+				"a":  "foo",
+				"b1": "bar",
+				"b2": "baz",
+			}),
+			relabel: []*Config{
+				{
+					Regex:  MustNewRegexp("(b.*)"),
+					Action: LabelDrop,
+				},
+			},
+			output: labels.FromMap(map[string]string{
+				"a": "foo",
+			}),
+		},
+	}
+
+	for _, test := range tests {
+		// Setting default fields, mimicking the behaviour in Prometheus.
+		for _, cfg := range test.relabel {
+			if cfg.Action == "" {
+				cfg.Action = DefaultRelabelConfig.Action
+			}
+			if cfg.Separator == "" {
+				cfg.Separator = DefaultRelabelConfig.Separator
+			}
+			if cfg.Regex.original == "" {
+				cfg.Regex = DefaultRelabelConfig.Regex
+			}
+			if cfg.Replacement == "" {
+				cfg.Replacement = DefaultRelabelConfig.Replacement
+			}
+		}
+
+		res := Process(test.input, test.relabel...)
+		require.Equal(t, test.output, res)
+	}
+}
+
+func TestTargetLabelValidity(t *testing.T) {
+	tests := []struct {
+		str   string
+		valid bool
+	}{
+		{"-label", false},
+		{"label", true},
+		{"label${1}", true},
+		{"${1}label", true},
+		{"${1}", true},
+		{"${1}label", true},
+		{"${", false},
+		{"$", false},
+		{"${}", false},
+		{"foo${", false},
+		{"$1", true},
+		{"asd$2asd", true},
+		{"-foo${1}bar-", false},
+		{"_${1}_", true},
+		{"foo${bar}foo", true},
+	}
+	for _, test := range tests {
+		require.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)),
+			"Expected %q to be %v", test.str, test.valid)
+	}
+}
diff -pruN 2.31.2+ds1-1/model/rulefmt/rulefmt.go 2.33.5+ds1-2/model/rulefmt/rulefmt.go
--- 2.31.2+ds1-1/model/rulefmt/rulefmt.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/rulefmt.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,316 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rulefmt
+
+import (
+	"bytes"
+	"context"
+	"io"
+	"io/ioutil"
+	"strings"
+	"time"
+
+	"github.com/pkg/errors"
+	"github.com/prometheus/common/model"
+	yaml "gopkg.in/yaml.v3"
+
+	"github.com/prometheus/prometheus/model/timestamp"
+	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/template"
+)
+
+// Error represents semantic errors on parsing rule groups.
+type Error struct {
+	Group    string
+	Rule     int
+	RuleName string
+	Err      WrappedError
+}
+
+// Error prints the error message in a formatted string.
+func (err *Error) Error() string {
+	if err.Err.nodeAlt != nil {
+		return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error()
+	} else if err.Err.node != nil {
+		return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error()
+	}
+	return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error()
+}
+
+// WrappedError wraps error with the yaml node which can be used to represent
+// the line and column numbers of the error.
+type WrappedError struct {
+	err     error
+	node    *yaml.Node
+	nodeAlt *yaml.Node
+}
+
+// Error prints the error message in a formatted string.
+func (we *WrappedError) Error() string {
+	if we.nodeAlt != nil {
+		return errors.Wrapf(we.err, "%d:%d: %d:%d", we.node.Line, we.node.Column, we.nodeAlt.Line, we.nodeAlt.Column).Error()
+	} else if we.node != nil {
+		return errors.Wrapf(we.err, "%d:%d", we.node.Line, we.node.Column).Error()
+	}
+	return we.err.Error()
+}
+
+// RuleGroups is a set of rule groups that are typically exposed in a file.
+type RuleGroups struct {
+	Groups []RuleGroup `yaml:"groups"`
+}
+
+type ruleGroups struct {
+	Groups []yaml.Node `yaml:"groups"`
+}
+
+// Validate validates all rules in the rule groups.
+func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
+	set := map[string]struct{}{}
+
+	for j, g := range g.Groups {
+		if g.Name == "" {
+			errs = append(errs, errors.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column))
+		}
+
+		if _, ok := set[g.Name]; ok {
+			errs = append(
+				errs,
+				errors.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name),
+			)
+		}
+
+		set[g.Name] = struct{}{}
+
+		for i, r := range g.Rules {
+			for _, node := range g.Rules[i].Validate() {
+				var ruleName yaml.Node
+				if r.Alert.Value != "" {
+					ruleName = r.Alert
+				} else {
+					ruleName = r.Record
+				}
+				errs = append(errs, &Error{
+					Group:    g.Name,
+					Rule:     i + 1,
+					RuleName: ruleName.Value,
+					Err:      node,
+				})
+			}
+		}
+	}
+
+	return errs
+}
+
+// RuleGroup is a list of sequentially evaluated recording and alerting rules.
+type RuleGroup struct {
+	Name     string         `yaml:"name"`
+	Interval model.Duration `yaml:"interval,omitempty"`
+	Limit    int            `yaml:"limit,omitempty"`
+	Rules    []RuleNode     `yaml:"rules"`
+}
+
+// Rule describes an alerting or recording rule.
+type Rule struct {
+	Record      string            `yaml:"record,omitempty"`
+	Alert       string            `yaml:"alert,omitempty"`
+	Expr        string            `yaml:"expr"`
+	For         model.Duration    `yaml:"for,omitempty"`
+	Labels      map[string]string `yaml:"labels,omitempty"`
+	Annotations map[string]string `yaml:"annotations,omitempty"`
+}
+
+// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules.
+type RuleNode struct {
+	Record      yaml.Node         `yaml:"record,omitempty"`
+	Alert       yaml.Node         `yaml:"alert,omitempty"`
+	Expr        yaml.Node         `yaml:"expr"`
+	For         model.Duration    `yaml:"for,omitempty"`
+	Labels      map[string]string `yaml:"labels,omitempty"`
+	Annotations map[string]string `yaml:"annotations,omitempty"`
+}
+
+// Validate the rule and return a list of encountered errors.
+func (r *RuleNode) Validate() (nodes []WrappedError) {
+	if r.Record.Value != "" && r.Alert.Value != "" {
+		nodes = append(nodes, WrappedError{
+			err:     errors.Errorf("only one of 'record' and 'alert' must be set"),
+			node:    &r.Record,
+			nodeAlt: &r.Alert,
+		})
+	}
+	if r.Record.Value == "" && r.Alert.Value == "" {
+		if r.Record.Value == "0" {
+			nodes = append(nodes, WrappedError{
+				err:  errors.Errorf("one of 'record' or 'alert' must be set"),
+				node: &r.Alert,
+			})
+		} else {
+			nodes = append(nodes, WrappedError{
+				err:  errors.Errorf("one of 'record' or 'alert' must be set"),
+				node: &r.Record,
+			})
+		}
+	}
+
+	if r.Expr.Value == "" {
+		nodes = append(nodes, WrappedError{
+			err:  errors.Errorf("field 'expr' must be set in rule"),
+			node: &r.Expr,
+		})
+	} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
+		nodes = append(nodes, WrappedError{
+			err:  errors.Wrapf(err, "could not parse expression"),
+			node: &r.Expr,
+		})
+	}
+	if r.Record.Value != "" {
+		if len(r.Annotations) > 0 {
+			nodes = append(nodes, WrappedError{
+				err:  errors.Errorf("invalid field 'annotations' in recording rule"),
+				node: &r.Record,
+			})
+		}
+		if r.For != 0 {
+			nodes = append(nodes, WrappedError{
+				err:  errors.Errorf("invalid field 'for' in recording rule"),
+				node: &r.Record,
+			})
+		}
+		if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
+			nodes = append(nodes, WrappedError{
+				err:  errors.Errorf("invalid recording rule name: %s", r.Record.Value),
+				node: &r.Record,
+			})
+		}
+	}
+
+	for k, v := range r.Labels {
+		if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
+			nodes = append(nodes, WrappedError{
+				err: errors.Errorf("invalid label name: %s", k),
+			})
+		}
+
+		if !model.LabelValue(v).IsValid() {
+			nodes = append(nodes, WrappedError{
+				err: errors.Errorf("invalid label value: %s", v),
+			})
+		}
+	}
+
+	for k := range r.Annotations {
+		if !model.LabelName(k).IsValid() {
+			nodes = append(nodes, WrappedError{
+				err: errors.Errorf("invalid annotation name: %s", k),
+			})
+		}
+	}
+
+	for _, err := range testTemplateParsing(r) {
+		nodes = append(nodes, WrappedError{err: err})
+	}
+
+	return
+}
+
+// testTemplateParsing checks if the templates used in labels and annotations
+// of the alerting rules are parsed correctly.
+func testTemplateParsing(rl *RuleNode) (errs []error) {
+	if rl.Alert.Value == "" {
+		// Not an alerting rule.
+		return errs
+	}
+
+	// Trying to parse templates.
+	tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, "", 0)
+	defs := []string{
+		"{{$labels := .Labels}}",
+		"{{$externalLabels := .ExternalLabels}}",
+		"{{$externalURL := .ExternalURL}}",
+		"{{$value := .Value}}",
+	}
+	parseTest := func(text string) error {
+		tmpl := template.NewTemplateExpander(
+			context.TODO(),
+			strings.Join(append(defs, text), ""),
+			"__alert_"+rl.Alert.Value,
+			tmplData,
+			model.Time(timestamp.FromTime(time.Now())),
+			nil,
+			nil,
+			nil,
+		)
+		return tmpl.ParseTest()
+	}
+
+	// Parsing Labels.
+	for k, val := range rl.Labels {
+		err := parseTest(val)
+		if err != nil {
+			errs = append(errs, errors.Wrapf(err, "label %q", k))
+		}
+	}
+
+	// Parsing Annotations.
+	for k, val := range rl.Annotations {
+		err := parseTest(val)
+		if err != nil {
+			errs = append(errs, errors.Wrapf(err, "annotation %q", k))
+		}
+	}
+
+	return errs
+}
+
+// Parse parses and validates a set of rules.
+func Parse(content []byte) (*RuleGroups, []error) {
+	var (
+		groups RuleGroups
+		node   ruleGroups
+		errs   []error
+	)
+
+	decoder := yaml.NewDecoder(bytes.NewReader(content))
+	decoder.KnownFields(true)
+	err := decoder.Decode(&groups)
+	// Ignore io.EOF which happens with empty input.
+	if err != nil && err != io.EOF {
+		errs = append(errs, err)
+	}
+	err = yaml.Unmarshal(content, &node)
+	if err != nil {
+		errs = append(errs, err)
+	}
+
+	if len(errs) > 0 {
+		return nil, errs
+	}
+
+	return &groups, groups.Validate(node)
+}
+
+// ParseFile reads and parses rules from a file.
+func ParseFile(file string) (*RuleGroups, []error) {
+	b, err := ioutil.ReadFile(file)
+	if err != nil {
+		return nil, []error{errors.Wrap(err, file)}
+	}
+	rgs, errs := Parse(b)
+	for i := range errs {
+		errs[i] = errors.Wrap(errs[i], file)
+	}
+	return rgs, errs
+}
diff -pruN 2.31.2+ds1-1/model/rulefmt/rulefmt_test.go 2.33.5+ds1-2/model/rulefmt/rulefmt_test.go
--- 2.31.2+ds1-1/model/rulefmt/rulefmt_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/rulefmt_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,301 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rulefmt
+
+import (
+	"errors"
+	"path/filepath"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+	"gopkg.in/yaml.v3"
+)
+
+func TestParseFileSuccess(t *testing.T) {
+	_, errs := ParseFile("testdata/test.yaml")
+	require.Empty(t, errs, "unexpected errors parsing file")
+}
+
+func TestParseFileFailure(t *testing.T) {
+	table := []struct {
+		filename string
+		errMsg   string
+	}{
+		{
+			filename: "duplicate_grp.bad.yaml",
+			errMsg:   "groupname: \"yolo\" is repeated in the same file",
+		},
+		{
+			filename: "bad_expr.bad.yaml",
+			errMsg:   "parse error",
+		},
+		{
+			filename: "record_and_alert.bad.yaml",
+			errMsg:   "only one of 'record' and 'alert' must be set",
+		},
+		{
+			filename: "no_rec_alert.bad.yaml",
+			errMsg:   "one of 'record' or 'alert' must be set",
+		},
+		{
+			filename: "noexpr.bad.yaml",
+			errMsg:   "field 'expr' must be set in rule",
+		},
+		{
+			filename: "bad_lname.bad.yaml",
+			errMsg:   "invalid label name",
+		},
+		{
+			filename: "bad_annotation.bad.yaml",
+			errMsg:   "invalid annotation name",
+		},
+		{
+			filename: "invalid_record_name.bad.yaml",
+			errMsg:   "invalid recording rule name",
+		},
+		{
+			filename: "bad_field.bad.yaml",
+			errMsg:   "field annotation not found",
+		},
+		{
+			filename: "invalid_label_name.bad.yaml",
+			errMsg:   "invalid label name",
+		},
+	}
+
+	for _, c := range table {
+		_, errs := ParseFile(filepath.Join("testdata", c.filename))
+		require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename)
+		require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
+	}
+}
+
+func TestTemplateParsing(t *testing.T) {
+	tests := []struct {
+		ruleString string
+		shouldPass bool
+	}{
+		{
+			ruleString: `
+groups:
+- name: example
+  rules:
+  - alert: InstanceDown
+    expr: up == 0
+    for: 5m
+    labels:
+      severity: "page"
+    annotations:
+      summary: "Instance {{ $labels.instance }} down"
+`,
+			shouldPass: true,
+		},
+		{
+			// `$label` instead of `$labels`.
+			ruleString: `
+groups:
+- name: example
+  rules:
+  - alert: InstanceDown
+    expr: up == 0
+    for: 5m
+    labels:
+      severity: "page"
+    annotations:
+      summary: "Instance {{ $label.instance }} down"
+`,
+			shouldPass: false,
+		},
+		{
+			// `$this_is_wrong`.
+			ruleString: `
+groups:
+- name: example
+  rules:
+  - alert: InstanceDown
+    expr: up == 0
+    for: 5m
+    labels:
+      severity: "{{$this_is_wrong}}"
+    annotations:
+      summary: "Instance {{ $labels.instance }} down"
+`,
+			shouldPass: false,
+		},
+		{
+			// `$labels.quantile * 100`.
+			ruleString: `
+groups:
+- name: example
+  rules:
+  - alert: InstanceDown
+    expr: up == 0
+    for: 5m
+    labels:
+      severity: "page"
+    annotations:
+      summary: "Instance {{ $labels.instance }} down"
+      description: "{{$labels.quantile * 100}}"
+`,
+			shouldPass: false,
+		},
+	}
+
+	for _, tst := range tests {
+		rgs, errs := Parse([]byte(tst.ruleString))
+		require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
+		passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0)
+		require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
+	}
+}
+
+func TestUniqueErrorNodes(t *testing.T) {
+	group := `
+groups:
+- name: example
+  rules:
+  - alert: InstanceDown
+    expr: up ===== 0
+    for: 5m
+    labels:
+      severity: "page"
+    annotations:
+      summary: "Instance {{ $labels.instance }} down"
+  - alert: InstanceUp
+    expr: up ===== 1
+    for: 5m
+    labels:
+      severity: "page"
+    annotations:
+      summary: "Instance {{ $labels.instance }} up"
+`
+	_, errs := Parse([]byte(group))
+	require.Len(t, errs, 2, "Expected two errors")
+	err0 := errs[0].(*Error).Err.node
+	err1 := errs[1].(*Error).Err.node
+	require.NotEqual(t, err0, err1, "Error nodes should not be the same")
+}
+
+func TestError(t *testing.T) {
+	tests := []struct {
+		name  string
+		error *Error
+		want  string
+	}{
+		{
+			name: "with alternative node provided in WrappedError",
+			error: &Error{
+				Group:    "some group",
+				Rule:     1,
+				RuleName: "some rule name",
+				Err: WrappedError{
+					err: errors.New("some error"),
+					node: &yaml.Node{
+						Line:   10,
+						Column: 20,
+					},
+					nodeAlt: &yaml.Node{
+						Line:   11,
+						Column: 21,
+					},
+				},
+			},
+			want: `10:20: 11:21: group "some group", rule 1, "some rule name": some error`,
+		},
+		{
+			name: "with node provided in WrappedError",
+			error: &Error{
+				Group:    "some group",
+				Rule:     1,
+				RuleName: "some rule name",
+				Err: WrappedError{
+					err: errors.New("some error"),
+					node: &yaml.Node{
+						Line:   10,
+						Column: 20,
+					},
+				},
+			},
+			want: `10:20: group "some group", rule 1, "some rule name": some error`,
+		},
+		{
+			name: "with only err provided in WrappedError",
+			error: &Error{
+				Group:    "some group",
+				Rule:     1,
+				RuleName: "some rule name",
+				Err: WrappedError{
+					err: errors.New("some error"),
+				},
+			},
+			want: `group "some group", rule 1, "some rule name": some error`,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got := tt.error.Error()
+			require.Equal(t, tt.want, got)
+		})
+	}
+}
+
+func TestWrappedError(t *testing.T) {
+	tests := []struct {
+		name         string
+		wrappedError *WrappedError
+		want         string
+	}{
+		{
+			name: "with alternative node provided",
+			wrappedError: &WrappedError{
+				err: errors.New("some error"),
+				node: &yaml.Node{
+					Line:   10,
+					Column: 20,
+				},
+				nodeAlt: &yaml.Node{
+					Line:   11,
+					Column: 21,
+				},
+			},
+			want: `10:20: 11:21: some error`,
+		},
+		{
+			name: "with node provided",
+			wrappedError: &WrappedError{
+				err: errors.New("some error"),
+				node: &yaml.Node{
+					Line:   10,
+					Column: 20,
+				},
+			},
+			want: `10:20: some error`,
+		},
+		{
+			name: "with only err provided",
+			wrappedError: &WrappedError{
+				err: errors.New("some error"),
+			},
+			want: `some error`,
+		},
+	}
+
+	for _, tt := range tests {
+		t.Run(tt.name, func(t *testing.T) {
+			got := tt.wrappedError.Error()
+			require.Equal(t, tt.want, got)
+		})
+	}
+}
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/bad_annotation.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/bad_annotation.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/bad_annotation.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/bad_annotation.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,7 @@
+groups:
+  - name: yolo
+    rules:
+      - alert: hola
+        expr: 1
+        annotations:
+          ins-tance: localhost
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/bad_expr.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/bad_expr.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/bad_expr.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/bad_expr.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,5 @@
+groups:
+  - name: yolo
+    rules:
+      - record: yolo
+        expr: rate(hi)
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/bad_field.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/bad_field.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/bad_field.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/bad_field.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,9 @@
+groups:
+  - name: yolo
+    rules:
+      - alert: hola
+        expr: 1
+        labels:
+          instance: localhost
+        annotation:
+          summary: annonations is written without s above
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/bad_lname.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/bad_lname.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/bad_lname.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/bad_lname.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,7 @@
+groups:
+  - name: yolo
+    rules:
+      - record: hola
+        expr: 1
+        labels:
+          ins-tance: localhost
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/duplicate_grp.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/duplicate_grp.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/duplicate_grp.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/duplicate_grp.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,3 @@
+groups:
+  - name: yolo
+  - name: yolo
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/invalid_label_name.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/invalid_label_name.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/invalid_label_name.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/invalid_label_name.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,7 @@
+groups:
+  - name: yolo
+    rules:
+      - record: hola
+        expr: 1
+        labels:
+          __name__: anything
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/invalid_record_name.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/invalid_record_name.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/invalid_record_name.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/invalid_record_name.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,5 @@
+groups:
+  - name: yolo
+    rules:
+      - record: strawberry{flavor="sweet"}
+        expr: 1
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/noexpr.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/noexpr.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/noexpr.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/noexpr.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,4 @@
+groups:
+  - name: yolo
+    rules:
+      - record: ylo
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/no_rec_alert.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/no_rec_alert.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/no_rec_alert.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/no_rec_alert.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,4 @@
+groups:
+  - name: yolo
+    rules:
+      - expr: 1
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/record_and_alert.bad.yaml 2.33.5+ds1-2/model/rulefmt/testdata/record_and_alert.bad.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/record_and_alert.bad.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/record_and_alert.bad.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,6 @@
+groups:
+  - name: yolo
+    rules:
+      - record: Hi
+        alert: Hello
+        expr: 1
diff -pruN 2.31.2+ds1-1/model/rulefmt/testdata/test.yaml 2.33.5+ds1-2/model/rulefmt/testdata/test.yaml
--- 2.31.2+ds1-1/model/rulefmt/testdata/test.yaml	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/rulefmt/testdata/test.yaml	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,64 @@
+groups:
+  - name: my-group-name
+    interval: 30s # defaults to global interval
+    rules:
+      - alert: HighErrors
+        expr: |
+          sum without(instance) (rate(errors_total[5m]))
+          /
+          sum without(instance) (rate(requests_total[5m]))
+        for: 5m
+        labels:
+          severity: critical
+        annotations:
+          description: "stuff's happening with {{ $.labels.service }}"
+
+      # Mix recording rules in the same list
+      - record: "new_metric"
+        expr: |
+          sum without(instance) (rate(errors_total[5m]))
+          /
+          sum without(instance) (rate(requests_total[5m]))
+        labels:
+          abc: edf
+          uvw: xyz
+
+      - alert: HighErrors
+        expr: |
+          sum without(instance) (rate(errors_total[5m]))
+          /
+          sum without(instance) (rate(requests_total[5m]))
+        for: 5m
+        labels:
+          severity: critical
+        annotations:
+          description: "stuff's happening with {{ $.labels.service }}"
+
+  - name: my-another-name
+    interval: 30s # defaults to global interval
+    rules:
+      - alert: HighErrors
+        expr: |
+          sum without(instance) (rate(errors_total[5m]))
+          /
+          sum without(instance) (rate(requests_total[5m]))
+        for: 5m
+        labels:
+          severity: critical
+
+      - record: "new_metric"
+        expr: |
+          sum without(instance) (rate(errors_total[5m]))
+          /
+          sum without(instance) (rate(requests_total[5m]))
+
+      - alert: HighErrors
+        expr: |
+          sum without(instance) (rate(errors_total[5m]))
+          /
+          sum without(instance) (rate(requests_total[5m]))
+        for: 5m
+        labels:
+          severity: critical
+        annotations:
+          description: "stuff's happening with {{ $.labels.service }}"
diff -pruN 2.31.2+ds1-1/model/textparse/interface.go 2.33.5+ds1-2/model/textparse/interface.go
--- 2.31.2+ds1-1/model/textparse/interface.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/interface.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,96 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+	"mime"
+
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/labels"
+)
+
+// Parser parses samples from a byte slice of samples in the official
+// Prometheus and OpenMetrics text exposition formats.
+type Parser interface {
+	// Series returns the bytes of the series, the timestamp if set, and the value
+	// of the current sample.
+	Series() ([]byte, *int64, float64)
+
+	// Help returns the metric name and help text in the current entry.
+	// Must only be called after Next returned a help entry.
+	// The returned byte slices become invalid after the next call to Next.
+	Help() ([]byte, []byte)
+
+	// Type returns the metric name and type in the current entry.
+	// Must only be called after Next returned a type entry.
+	// The returned byte slices become invalid after the next call to Next.
+	Type() ([]byte, MetricType)
+
+	// Unit returns the metric name and unit in the current entry.
+	// Must only be called after Next returned a unit entry.
+	// The returned byte slices become invalid after the next call to Next.
+	Unit() ([]byte, []byte)
+
+	// Comment returns the text of the current comment.
+	// Must only be called after Next returned a comment entry.
+	// The returned byte slice becomes invalid after the next call to Next.
+	Comment() []byte
+
+	// Metric writes the labels of the current sample into the passed labels.
+	// It returns the string from which the metric was parsed.
+	Metric(l *labels.Labels) string
+
+	// Exemplar writes the exemplar of the current sample into the passed
+	// exemplar. It returns if an exemplar exists or not.
+	Exemplar(l *exemplar.Exemplar) bool
+
+	// Next advances the parser to the next sample. It returns false if no
+	// more samples were read or an error occurred.
+	Next() (Entry, error)
+}
+
+// New returns a new parser of the byte slice.
+func New(b []byte, contentType string) Parser {
+	mediaType, _, err := mime.ParseMediaType(contentType)
+	if err == nil && mediaType == "application/openmetrics-text" {
+		return NewOpenMetricsParser(b)
+	}
+	return NewPromParser(b)
+}
+
+// Entry represents the type of a parsed entry.
+type Entry int
+
+const (
+	EntryInvalid Entry = -1
+	EntryType    Entry = 0
+	EntryHelp    Entry = 1
+	EntrySeries  Entry = 2
+	EntryComment Entry = 3
+	EntryUnit    Entry = 4
+)
+
+// MetricType represents metric type values.
+type MetricType string
+
+const (
+	MetricTypeCounter        = MetricType("counter")
+	MetricTypeGauge          = MetricType("gauge")
+	MetricTypeHistogram      = MetricType("histogram")
+	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
+	MetricTypeSummary        = MetricType("summary")
+	MetricTypeInfo           = MetricType("info")
+	MetricTypeStateset       = MetricType("stateset")
+	MetricTypeUnknown        = MetricType("unknown")
+)
diff -pruN 2.31.2+ds1-1/model/textparse/openmetricslex.l 2.33.5+ds1-2/model/textparse/openmetricslex.l
--- 2.31.2+ds1-1/model/textparse/openmetricslex.l	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/openmetricslex.l	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,80 @@
+%{
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+    "fmt"
+)
+
+// Lex is called by the parser generated by "go tool yacc" to obtain each
+// token. The method is opened before the matching rules block and closed at
+// the end of the file.
+func (l *openMetricsLexer) Lex() token {
+    if l.i >= len(l.b) {
+        return tEOF
+    }
+    c := l.b[l.i]
+    l.start = l.i
+
+%}
+
+D     [0-9]
+L     [a-zA-Z_]
+M     [a-zA-Z_:]
+C     [^\n]
+S     [ ]
+
+%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp sExemplar sEValue sETimestamp
+
+%yyc c
+%yyn c = l.next()
+%yyt l.state
+
+
+%%
+
+#{S}                                  l.state = sComment
+<sComment>HELP{S}                     l.state = sMeta1; return tHelp
+<sComment>TYPE{S}                     l.state = sMeta1; return tType
+<sComment>UNIT{S}                     l.state = sMeta1; return tUnit
+<sComment>"EOF"\n?                    l.state = sInit; return tEOFWord
+<sMeta1>{M}({M}|{D})*                 l.state = sMeta2; return tMName
+<sMeta2>{S}{C}*\n                     l.state = sInit; return tText
+
+{M}({M}|{D})*                         l.state = sValue; return tMName
+<sValue>\{                            l.state = sLabels; return tBraceOpen
+<sLabels>{L}({L}|{D})*                return tLName
+<sLabels>\}                           l.state = sValue; return tBraceClose
+<sLabels>=                            l.state = sLValue; return tEqual
+<sLabels>,                            return tComma
+<sLValue>\"(\\.|[^\\"\n])*\"          l.state = sLabels; return tLValue
+<sValue>{S}[^ \n]+                    l.state = sTimestamp; return tValue
+<sTimestamp>{S}[^ \n]+                return tTimestamp
+<sTimestamp>\n                        l.state = sInit; return tLinebreak
+<sTimestamp>{S}#{S}\{                 l.state = sExemplar; return tComment
+
+<sExemplar>{L}({L}|{D})*              return tLName
+<sExemplar>\}                         l.state = sEValue; return tBraceClose
+<sExemplar>=                          l.state = sEValue; return tEqual
+<sEValue>\"(\\.|[^\\"\n])*\"          l.state = sExemplar; return tLValue
+<sExemplar>,                          return tComma
+<sEValue>{S}[^ \n]+                   l.state = sETimestamp; return tValue
+<sETimestamp>{S}[^ \n]+               return tTimestamp
+<sETimestamp>\n                       l.state = sInit; return tLinebreak
+
+%%
+
+    return tInvalid
+}
diff -pruN 2.31.2+ds1-1/model/textparse/openmetricslex.l.go 2.33.5+ds1-2/model/textparse/openmetricslex.l.go
--- 2.31.2+ds1-1/model/textparse/openmetricslex.l.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/openmetricslex.l.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,762 @@
+// Code generated by golex. DO NOT EDIT.
+
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+	"fmt"
+)
+
+// Lex is called by the parser generated by "go tool yacc" to obtain each
+// token. The method is opened before the matching rules block and closed at
+// the end of the file.
+func (l *openMetricsLexer) Lex() token {
+	if l.i >= len(l.b) {
+		return tEOF
+	}
+	c := l.b[l.i]
+	l.start = l.i
+
+yystate0:
+
+	switch yyt := l.state; yyt {
+	default:
+		panic(fmt.Errorf(`invalid start condition %d`, yyt))
+	case 0: // start condition: INITIAL
+		goto yystart1
+	case 1: // start condition: sComment
+		goto yystart5
+	case 2: // start condition: sMeta1
+		goto yystart25
+	case 3: // start condition: sMeta2
+		goto yystart27
+	case 4: // start condition: sLabels
+		goto yystart30
+	case 5: // start condition: sLValue
+		goto yystart35
+	case 6: // start condition: sValue
+		goto yystart39
+	case 7: // start condition: sTimestamp
+		goto yystart43
+	case 8: // start condition: sExemplar
+		goto yystart50
+	case 9: // start condition: sEValue
+		goto yystart55
+	case 10: // start condition: sETimestamp
+		goto yystart61
+	}
+
+	goto yystate0 // silence unused label error
+	goto yystate1 // silence unused label error
+yystate1:
+	c = l.next()
+yystart1:
+	switch {
+	default:
+		goto yyabort
+	case c == '#':
+		goto yystate2
+	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate4
+	}
+
+yystate2:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate3
+	}
+
+yystate3:
+	c = l.next()
+	goto yyrule1
+
+yystate4:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule8
+	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate4
+	}
+
+	goto yystate5 // silence unused label error
+yystate5:
+	c = l.next()
+yystart5:
+	switch {
+	default:
+		goto yyabort
+	case c == 'E':
+		goto yystate6
+	case c == 'H':
+		goto yystate10
+	case c == 'T':
+		goto yystate15
+	case c == 'U':
+		goto yystate20
+	}
+
+yystate6:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'O':
+		goto yystate7
+	}
+
+yystate7:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'F':
+		goto yystate8
+	}
+
+yystate8:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule5
+	case c == '\n':
+		goto yystate9
+	}
+
+yystate9:
+	c = l.next()
+	goto yyrule5
+
+yystate10:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'E':
+		goto yystate11
+	}
+
+yystate11:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'L':
+		goto yystate12
+	}
+
+yystate12:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'P':
+		goto yystate13
+	}
+
+yystate13:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate14
+	}
+
+yystate14:
+	c = l.next()
+	goto yyrule2
+
+yystate15:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'Y':
+		goto yystate16
+	}
+
+yystate16:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'P':
+		goto yystate17
+	}
+
+yystate17:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'E':
+		goto yystate18
+	}
+
+yystate18:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate19
+	}
+
+yystate19:
+	c = l.next()
+	goto yyrule3
+
+yystate20:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'N':
+		goto yystate21
+	}
+
+yystate21:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'I':
+		goto yystate22
+	}
+
+yystate22:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'T':
+		goto yystate23
+	}
+
+yystate23:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate24
+	}
+
+yystate24:
+	c = l.next()
+	goto yyrule4
+
+	goto yystate25 // silence unused label error
+yystate25:
+	c = l.next()
+yystart25:
+	switch {
+	default:
+		goto yyabort
+	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate26
+	}
+
+yystate26:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule6
+	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate26
+	}
+
+	goto yystate27 // silence unused label error
+yystate27:
+	c = l.next()
+yystart27:
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate28
+	}
+
+yystate28:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '\n':
+		goto yystate29
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
+		goto yystate28
+	}
+
+yystate29:
+	c = l.next()
+	goto yyrule7
+
+	goto yystate30 // silence unused label error
+yystate30:
+	c = l.next()
+yystart30:
+	switch {
+	default:
+		goto yyabort
+	case c == ',':
+		goto yystate31
+	case c == '=':
+		goto yystate32
+	case c == '}':
+		goto yystate34
+	case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate33
+	}
+
+yystate31:
+	c = l.next()
+	goto yyrule13
+
+yystate32:
+	c = l.next()
+	goto yyrule12
+
+yystate33:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule10
+	case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate33
+	}
+
+yystate34:
+	c = l.next()
+	goto yyrule11
+
+	goto yystate35 // silence unused label error
+yystate35:
+	c = l.next()
+yystart35:
+	switch {
+	default:
+		goto yyabort
+	case c == '"':
+		goto yystate36
+	}
+
+yystate36:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '"':
+		goto yystate37
+	case c == '\\':
+		goto yystate38
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
+		goto yystate36
+	}
+
+yystate37:
+	c = l.next()
+	goto yyrule14
+
+yystate38:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
+		goto yystate36
+	}
+
+	goto yystate39 // silence unused label error
+yystate39:
+	c = l.next()
+yystart39:
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate40
+	case c == '{':
+		goto yystate42
+	}
+
+yystate40:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate41
+	}
+
+yystate41:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule15
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate41
+	}
+
+yystate42:
+	c = l.next()
+	goto yyrule9
+
+	goto yystate43 // silence unused label error
+yystate43:
+	c = l.next()
+yystart43:
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate45
+	case c == '\n':
+		goto yystate44
+	}
+
+yystate44:
+	c = l.next()
+	goto yyrule17
+
+yystate45:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '#':
+		goto yystate47
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ':
+		goto yystate46
+	}
+
+yystate46:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule16
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate46
+	}
+
+yystate47:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule16
+	case c == ' ':
+		goto yystate48
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate46
+	}
+
+yystate48:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '{':
+		goto yystate49
+	}
+
+yystate49:
+	c = l.next()
+	goto yyrule18
+
+	goto yystate50 // silence unused label error
+yystate50:
+	c = l.next()
+yystart50:
+	switch {
+	default:
+		goto yyabort
+	case c == ',':
+		goto yystate51
+	case c == '=':
+		goto yystate52
+	case c == '}':
+		goto yystate54
+	case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate53
+	}
+
+yystate51:
+	c = l.next()
+	goto yyrule23
+
+yystate52:
+	c = l.next()
+	goto yyrule21
+
+yystate53:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule19
+	case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate53
+	}
+
+yystate54:
+	c = l.next()
+	goto yyrule20
+
+	goto yystate55 // silence unused label error
+yystate55:
+	c = l.next()
+yystart55:
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate56
+	case c == '"':
+		goto yystate58
+	}
+
+yystate56:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate57
+	}
+
+yystate57:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule24
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate57
+	}
+
+yystate58:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '"':
+		goto yystate59
+	case c == '\\':
+		goto yystate60
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
+		goto yystate58
+	}
+
+yystate59:
+	c = l.next()
+	goto yyrule22
+
+yystate60:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
+		goto yystate58
+	}
+
+	goto yystate61 // silence unused label error
+yystate61:
+	c = l.next()
+yystart61:
+	switch {
+	default:
+		goto yyabort
+	case c == ' ':
+		goto yystate63
+	case c == '\n':
+		goto yystate62
+	}
+
+yystate62:
+	c = l.next()
+	goto yyrule26
+
+yystate63:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate64
+	}
+
+yystate64:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule25
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate64
+	}
+
+yyrule1: // #{S}
+	{
+		l.state = sComment
+		goto yystate0
+	}
+yyrule2: // HELP{S}
+	{
+		l.state = sMeta1
+		return tHelp
+		goto yystate0
+	}
+yyrule3: // TYPE{S}
+	{
+		l.state = sMeta1
+		return tType
+		goto yystate0
+	}
+yyrule4: // UNIT{S}
+	{
+		l.state = sMeta1
+		return tUnit
+		goto yystate0
+	}
+yyrule5: // "EOF"\n?
+	{
+		l.state = sInit
+		return tEOFWord
+		goto yystate0
+	}
+yyrule6: // {M}({M}|{D})*
+	{
+		l.state = sMeta2
+		return tMName
+		goto yystate0
+	}
+yyrule7: // {S}{C}*\n
+	{
+		l.state = sInit
+		return tText
+		goto yystate0
+	}
+yyrule8: // {M}({M}|{D})*
+	{
+		l.state = sValue
+		return tMName
+		goto yystate0
+	}
+yyrule9: // \{
+	{
+		l.state = sLabels
+		return tBraceOpen
+		goto yystate0
+	}
+yyrule10: // {L}({L}|{D})*
+	{
+		return tLName
+	}
+yyrule11: // \}
+	{
+		l.state = sValue
+		return tBraceClose
+		goto yystate0
+	}
+yyrule12: // =
+	{
+		l.state = sLValue
+		return tEqual
+		goto yystate0
+	}
+yyrule13: // ,
+	{
+		return tComma
+	}
+yyrule14: // \"(\\.|[^\\"\n])*\"
+	{
+		l.state = sLabels
+		return tLValue
+		goto yystate0
+	}
+yyrule15: // {S}[^ \n]+
+	{
+		l.state = sTimestamp
+		return tValue
+		goto yystate0
+	}
+yyrule16: // {S}[^ \n]+
+	{
+		return tTimestamp
+	}
+yyrule17: // \n
+	{
+		l.state = sInit
+		return tLinebreak
+		goto yystate0
+	}
+yyrule18: // {S}#{S}\{
+	{
+		l.state = sExemplar
+		return tComment
+		goto yystate0
+	}
+yyrule19: // {L}({L}|{D})*
+	{
+		return tLName
+	}
+yyrule20: // \}
+	{
+		l.state = sEValue
+		return tBraceClose
+		goto yystate0
+	}
+yyrule21: // =
+	{
+		l.state = sEValue
+		return tEqual
+		goto yystate0
+	}
+yyrule22: // \"(\\.|[^\\"\n])*\"
+	{
+		l.state = sExemplar
+		return tLValue
+		goto yystate0
+	}
+yyrule23: // ,
+	{
+		return tComma
+	}
+yyrule24: // {S}[^ \n]+
+	{
+		l.state = sETimestamp
+		return tValue
+		goto yystate0
+	}
+yyrule25: // {S}[^ \n]+
+	{
+		return tTimestamp
+	}
+yyrule26: // \n
+	{
+		l.state = sInit
+		return tLinebreak
+		goto yystate0
+	}
+	panic("unreachable")
+
+	goto yyabort // silence unused label error
+
+yyabort: // no lexem recognized
+
+	return tInvalid
+}
diff -pruN 2.31.2+ds1-1/model/textparse/openmetricsparse.go 2.33.5+ds1-2/model/textparse/openmetricsparse.go
--- 2.31.2+ds1-1/model/textparse/openmetricsparse.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/openmetricsparse.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,481 @@
+// Copyright 2018 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate go get -u modernc.org/golex
+//go:generate golex -o=openmetricslex.l.go openmetricslex.l
+
+package textparse
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"math"
+	"sort"
+	"strings"
+	"unicode/utf8"
+
+	"github.com/pkg/errors"
+
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/value"
+)
+
+var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")}
+
+type openMetricsLexer struct {
+	b     []byte
+	i     int
+	start int
+	err   error
+	state int
+}
+
+// buf returns the buffer of the current token.
+func (l *openMetricsLexer) buf() []byte {
+	return l.b[l.start:l.i]
+}
+
+func (l *openMetricsLexer) cur() byte {
+	if l.i < len(l.b) {
+		return l.b[l.i]
+	}
+	return byte(' ')
+}
+
+// next advances the openMetricsLexer to the next character.
+func (l *openMetricsLexer) next() byte {
+	l.i++
+	if l.i >= len(l.b) {
+		l.err = io.EOF
+		return byte(tEOF)
+	}
+	// Lex struggles with null bytes. If we are in a label value or help string, where
+	// they are allowed, consume them here immediately.
+	for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
+		l.i++
+		if l.i >= len(l.b) {
+			l.err = io.EOF
+			return byte(tEOF)
+		}
+	}
+	return l.b[l.i]
+}
+
+func (l *openMetricsLexer) Error(es string) {
+	l.err = errors.New(es)
+}
+
+// OpenMetricsParser parses samples from a byte slice of samples in the official
+// OpenMetrics text exposition format.
+// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit
+type OpenMetricsParser struct {
+	l       *openMetricsLexer
+	series  []byte
+	text    []byte
+	mtype   MetricType
+	val     float64
+	ts      int64
+	hasTS   bool
+	start   int
+	offsets []int
+
+	eOffsets      []int
+	exemplar      []byte
+	exemplarVal   float64
+	exemplarTs    int64
+	hasExemplarTs bool
+}
+
+// NewOpenMetricsParser returns a new parser of the byte slice.
+func NewOpenMetricsParser(b []byte) Parser {
+	return &OpenMetricsParser{l: &openMetricsLexer{b: b}}
+}
+
+// Series returns the bytes of the series, the timestamp if set, and the value
+// of the current sample.
+func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
+	if p.hasTS {
+		ts := p.ts
+		return p.series, &ts, p.val
+	}
+	return p.series, nil, p.val
+}
+
+// Help returns the metric name and help text in the current entry.
+// Must only be called after Next returned a help entry.
+// The returned byte slices become invalid after the next call to Next.
+func (p *OpenMetricsParser) Help() ([]byte, []byte) {
+	m := p.l.b[p.offsets[0]:p.offsets[1]]
+
+	// Replacer causes allocations. Replace only when necessary.
+	if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
+		// OpenMetrics always uses the Prometheus format label value escaping.
+		return m, []byte(lvalReplacer.Replace(string(p.text)))
+	}
+	return m, p.text
+}
+
+// Type returns the metric name and type in the current entry.
+// Must only be called after Next returned a type entry.
+// The returned byte slices become invalid after the next call to Next.
+func (p *OpenMetricsParser) Type() ([]byte, MetricType) {
+	return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
+}
+
+// Unit returns the metric name and unit in the current entry.
+// Must only be called after Next returned a unit entry.
+// The returned byte slices become invalid after the next call to Next.
+func (p *OpenMetricsParser) Unit() ([]byte, []byte) {
+	// The Prometheus format does not have units.
+	return p.l.b[p.offsets[0]:p.offsets[1]], p.text
+}
+
+// Comment returns the text of the current comment.
+// Must only be called after Next returned a comment entry.
+// The returned byte slice becomes invalid after the next call to Next.
+func (p *OpenMetricsParser) Comment() []byte {
+	return p.text
+}
+
+// Metric writes the labels of the current sample into the passed labels.
+// It returns the string from which the metric was parsed.
+func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
+	// Allocate the full immutable string immediately, so we just
+	// have to create references on it below.
+	s := string(p.series)
+
+	*l = append(*l, labels.Label{
+		Name:  labels.MetricName,
+		Value: s[:p.offsets[0]-p.start],
+	})
+
+	for i := 1; i < len(p.offsets); i += 4 {
+		a := p.offsets[i] - p.start
+		b := p.offsets[i+1] - p.start
+		c := p.offsets[i+2] - p.start
+		d := p.offsets[i+3] - p.start
+
+		// Replacer causes allocations. Replace only when necessary.
+		if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
+			*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
+			continue
+		}
+		*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
+	}
+
+	// Sort labels. We can skip the first entry since the metric name is
+	// already at the right place.
+	sort.Sort((*l)[1:])
+
+	return s
+}
+
+// Exemplar writes the exemplar of the current sample into the passed
+// exemplar. It returns the whether an exemplar exists.
+func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
+	if len(p.exemplar) == 0 {
+		return false
+	}
+
+	// Allocate the full immutable string immediately, so we just
+	// have to create references on it below.
+	s := string(p.exemplar)
+
+	e.Value = p.exemplarVal
+	if p.hasExemplarTs {
+		e.HasTs = true
+		e.Ts = p.exemplarTs
+	}
+
+	for i := 0; i < len(p.eOffsets); i += 4 {
+		a := p.eOffsets[i] - p.start
+		b := p.eOffsets[i+1] - p.start
+		c := p.eOffsets[i+2] - p.start
+		d := p.eOffsets[i+3] - p.start
+
+		e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]})
+	}
+
+	// Sort the labels.
+	sort.Sort(e.Labels)
+
+	return true
+}
+
+// nextToken returns the next token from the openMetricsLexer.
+func (p *OpenMetricsParser) nextToken() token {
+	tok := p.l.Lex()
+	return tok
+}
+
+// Next advances the parser to the next sample. It returns false if no
+// more samples were read or an error occurred.
+func (p *OpenMetricsParser) Next() (Entry, error) {
+	var err error
+
+	p.start = p.l.i
+	p.offsets = p.offsets[:0]
+	p.eOffsets = p.eOffsets[:0]
+	p.exemplar = p.exemplar[:0]
+	p.exemplarVal = 0
+	p.hasExemplarTs = false
+
+	switch t := p.nextToken(); t {
+	case tEOFWord:
+		if t := p.nextToken(); t != tEOF {
+			return EntryInvalid, errors.New("unexpected data after # EOF")
+		}
+		return EntryInvalid, io.EOF
+	case tEOF:
+		return EntryInvalid, errors.New("data does not end with # EOF")
+	case tHelp, tType, tUnit:
+		switch t := p.nextToken(); t {
+		case tMName:
+			p.offsets = append(p.offsets, p.l.start, p.l.i)
+		default:
+			return EntryInvalid, parseError("expected metric name after HELP", t)
+		}
+		switch t := p.nextToken(); t {
+		case tText:
+			if len(p.l.buf()) > 1 {
+				p.text = p.l.buf()[1 : len(p.l.buf())-1]
+			} else {
+				p.text = []byte{}
+			}
+		default:
+			return EntryInvalid, parseError("expected text in HELP", t)
+		}
+		switch t {
+		case tType:
+			switch s := yoloString(p.text); s {
+			case "counter":
+				p.mtype = MetricTypeCounter
+			case "gauge":
+				p.mtype = MetricTypeGauge
+			case "histogram":
+				p.mtype = MetricTypeHistogram
+			case "gaugehistogram":
+				p.mtype = MetricTypeGaugeHistogram
+			case "summary":
+				p.mtype = MetricTypeSummary
+			case "info":
+				p.mtype = MetricTypeInfo
+			case "stateset":
+				p.mtype = MetricTypeStateset
+			case "unknown":
+				p.mtype = MetricTypeUnknown
+			default:
+				return EntryInvalid, errors.Errorf("invalid metric type %q", s)
+			}
+		case tHelp:
+			if !utf8.Valid(p.text) {
+				return EntryInvalid, errors.New("help text is not a valid utf8 string")
+			}
+		}
+		switch t {
+		case tHelp:
+			return EntryHelp, nil
+		case tType:
+			return EntryType, nil
+		case tUnit:
+			m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]])
+			u := yoloString(p.text)
+			if len(u) > 0 {
+				if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
+					return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m)
+				}
+			}
+			return EntryUnit, nil
+		}
+
+	case tMName:
+		p.offsets = append(p.offsets, p.l.i)
+		p.series = p.l.b[p.start:p.l.i]
+
+		t2 := p.nextToken()
+		if t2 == tBraceOpen {
+			p.offsets, err = p.parseLVals(p.offsets)
+			if err != nil {
+				return EntryInvalid, err
+			}
+			p.series = p.l.b[p.start:p.l.i]
+			t2 = p.nextToken()
+		}
+		p.val, err = p.getFloatValue(t2, "metric")
+		if err != nil {
+			return EntryInvalid, err
+		}
+
+		p.hasTS = false
+		switch t2 := p.nextToken(); t2 {
+		case tEOF:
+			return EntryInvalid, errors.New("data does not end with # EOF")
+		case tLinebreak:
+			break
+		case tComment:
+			if err := p.parseComment(); err != nil {
+				return EntryInvalid, err
+			}
+		case tTimestamp:
+			p.hasTS = true
+			var ts float64
+			// A float is enough to hold what we need for millisecond resolution.
+			if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
+				return EntryInvalid, err
+			}
+			if math.IsNaN(ts) || math.IsInf(ts, 0) {
+				return EntryInvalid, errors.New("invalid timestamp")
+			}
+			p.ts = int64(ts * 1000)
+			switch t3 := p.nextToken(); t3 {
+			case tLinebreak:
+			case tComment:
+				if err := p.parseComment(); err != nil {
+					return EntryInvalid, err
+				}
+			default:
+				return EntryInvalid, parseError("expected next entry after timestamp", t3)
+			}
+		default:
+			return EntryInvalid, parseError("expected timestamp or # symbol", t2)
+		}
+		return EntrySeries, nil
+
+	default:
+		err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur()))
+	}
+	return EntryInvalid, err
+}
+
+func (p *OpenMetricsParser) parseComment() error {
+	// Validate the name of the metric. It must have _total or _bucket as
+	// suffix for exemplars to be supported.
+	if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil {
+		return err
+	}
+
+	var err error
+	// Parse the labels.
+	p.eOffsets, err = p.parseLVals(p.eOffsets)
+	if err != nil {
+		return err
+	}
+	p.exemplar = p.l.b[p.start:p.l.i]
+
+	// Get the value.
+	p.exemplarVal, err = p.getFloatValue(p.nextToken(), "exemplar labels")
+	if err != nil {
+		return err
+	}
+
+	// Read the optional timestamp.
+	p.hasExemplarTs = false
+	switch t2 := p.nextToken(); t2 {
+	case tEOF:
+		return errors.New("data does not end with # EOF")
+	case tLinebreak:
+		break
+	case tTimestamp:
+		p.hasExemplarTs = true
+		var ts float64
+		// A float is enough to hold what we need for millisecond resolution.
+		if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
+			return err
+		}
+		if math.IsNaN(ts) || math.IsInf(ts, 0) {
+			return errors.New("invalid exemplar timestamp")
+		}
+		p.exemplarTs = int64(ts * 1000)
+		switch t3 := p.nextToken(); t3 {
+		case tLinebreak:
+		default:
+			return parseError("expected next entry after exemplar timestamp", t3)
+		}
+	default:
+		return parseError("expected timestamp or comment", t2)
+	}
+	return nil
+}
+
+func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
+	first := true
+	for {
+		t := p.nextToken()
+		switch t {
+		case tBraceClose:
+			return offsets, nil
+		case tComma:
+			if first {
+				return nil, parseError("expected label name or left brace", t)
+			}
+			t = p.nextToken()
+			if t != tLName {
+				return nil, parseError("expected label name", t)
+			}
+		case tLName:
+			if !first {
+				return nil, parseError("expected comma", t)
+			}
+		default:
+			if first {
+				return nil, parseError("expected label name or left brace", t)
+			}
+			return nil, parseError("expected comma or left brace", t)
+
+		}
+		first = false
+		// t is now a label name.
+
+		offsets = append(offsets, p.l.start, p.l.i)
+
+		if t := p.nextToken(); t != tEqual {
+			return nil, parseError("expected equal", t)
+		}
+		if t := p.nextToken(); t != tLValue {
+			return nil, parseError("expected label value", t)
+		}
+		if !utf8.Valid(p.l.buf()) {
+			return nil, errors.New("invalid UTF-8 label value")
+		}
+
+		// The openMetricsLexer ensures the value string is quoted. Strip first
+		// and last character.
+		offsets = append(offsets, p.l.start+1, p.l.i-1)
+	}
+}
+
+func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {
+	if t != tValue {
+		return 0, parseError(fmt.Sprintf("expected value after %v", after), t)
+	}
+	val, err := parseFloat(yoloString(p.l.buf()[1:]))
+	if err != nil {
+		return 0, err
+	}
+	// Ensure canonical NaN value.
+	if math.IsNaN(p.exemplarVal) {
+		val = math.Float64frombits(value.NormalNaN)
+	}
+	return val, nil
+}
+
+func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error {
+	for _, suffix := range allowedSuffixes {
+		if bytes.HasSuffix(name, suffix) {
+			return nil
+		}
+	}
+	return fmt.Errorf("metric name %v does not support exemplars", string(name))
+}
diff -pruN 2.31.2+ds1-1/model/textparse/openmetricsparse_test.go 2.33.5+ds1-2/model/textparse/openmetricsparse_test.go
--- 2.31.2+ds1-1/model/textparse/openmetricsparse_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/openmetricsparse_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,612 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+	"io"
+	"testing"
+
+	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/labels"
+)
+
+func TestOpenMetricsParse(t *testing.T) {
+	input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+# UNIT go_gc_duration_seconds seconds
+go_gc_duration_seconds{quantile="0"} 4.9351e-05
+go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
+go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
+# HELP nohelp1 
+# HELP help2 escape \ \n \\ \" \x chars
+# UNIT nounit 
+go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05
+go_gc_duration_seconds_count 99
+some:aggregate:rate5m{a_b="c"} 1
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 33 123.123
+# TYPE hh histogram
+hh_bucket{le="+Inf"} 1
+# TYPE gh gaugehistogram
+gh_bucket{le="+Inf"} 1
+# TYPE hhh histogram
+hhh_bucket{le="+Inf"} 1 # {aa="bb"} 4
+# TYPE ggh gaugehistogram
+ggh_bucket{le="+Inf"} 1 # {cc="dd",xx="yy"} 4 123.123
+# TYPE ii info
+ii{foo="bar"} 1
+# TYPE ss stateset
+ss{ss="foo"} 1
+ss{ss="bar"} 0
+# TYPE un unknown
+_metric_starting_with_underscore 1
+testmetric{_label_starting_with_underscore="foo"} 1
+testmetric{label="\"bar\""} 1
+# TYPE foo counter
+foo_total 17.0 1520879607.789 # {xx="yy"} 5`
+
+	input += "\n# HELP metric foo\x00bar"
+	input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
+	input += "\n# EOF\n"
+
+	int64p := func(x int64) *int64 { return &x }
+
+	exp := []struct {
+		lset    labels.Labels
+		m       string
+		t       *int64
+		v       float64
+		typ     MetricType
+		help    string
+		unit    string
+		comment string
+		e       *exemplar.Exemplar
+	}{
+		{
+			m:    "go_gc_duration_seconds",
+			help: "A summary of the GC invocation durations.",
+		}, {
+			m:   "go_gc_duration_seconds",
+			typ: MetricTypeSummary,
+		}, {
+			m:    "go_gc_duration_seconds",
+			unit: "seconds",
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0"}`,
+			v:    4.9351e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0.25"}`,
+			v:    7.424100000000001e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0.5",a="b"}`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
+		}, {
+			m:    "nohelp1",
+			help: "",
+		}, {
+			m:    "help2",
+			help: "escape \\ \n \\ \" \\x chars",
+		}, {
+			m:    "nounit",
+			unit: "",
+		}, {
+			m:    `go_gc_duration_seconds{quantile="1.0",a="b"}`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds_count`,
+			v:    99,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
+		}, {
+			m:    `some:aggregate:rate5m{a_b="c"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
+		}, {
+			m:    "go_goroutines",
+			help: "Number of goroutines that currently exist.",
+		}, {
+			m:   "go_goroutines",
+			typ: MetricTypeGauge,
+		}, {
+			m:    `go_goroutines`,
+			v:    33,
+			t:    int64p(123123),
+			lset: labels.FromStrings("__name__", "go_goroutines"),
+		}, {
+			m:   "hh",
+			typ: MetricTypeHistogram,
+		}, {
+			m:    `hh_bucket{le="+Inf"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
+		}, {
+			m:   "gh",
+			typ: MetricTypeGaugeHistogram,
+		}, {
+			m:    `gh_bucket{le="+Inf"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
+		}, {
+			m:   "hhh",
+			typ: MetricTypeHistogram,
+		}, {
+			m:    `hhh_bucket{le="+Inf"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
+			e:    &exemplar.Exemplar{Labels: labels.FromStrings("aa", "bb"), Value: 4},
+		}, {
+			m:   "ggh",
+			typ: MetricTypeGaugeHistogram,
+		}, {
+			m:    `ggh_bucket{le="+Inf"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
+			e:    &exemplar.Exemplar{Labels: labels.FromStrings("cc", "dd", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
+		}, {
+			m:   "ii",
+			typ: MetricTypeInfo,
+		}, {
+			m:    `ii{foo="bar"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "ii", "foo", "bar"),
+		}, {
+			m:   "ss",
+			typ: MetricTypeStateset,
+		}, {
+			m:    `ss{ss="foo"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "ss", "ss", "foo"),
+		}, {
+			m:    `ss{ss="bar"}`,
+			v:    0,
+			lset: labels.FromStrings("__name__", "ss", "ss", "bar"),
+		}, {
+			m:   "un",
+			typ: MetricTypeUnknown,
+		}, {
+			m:    "_metric_starting_with_underscore",
+			v:    1,
+			lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
+		}, {
+			m:    "testmetric{_label_starting_with_underscore=\"foo\"}",
+			v:    1,
+			lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
+		}, {
+			m:    "testmetric{label=\"\\\"bar\\\"\"}",
+			v:    1,
+			lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
+		}, {
+			m:   "foo",
+			typ: MetricTypeCounter,
+		}, {
+			m:    "foo_total",
+			v:    17,
+			lset: labels.FromStrings("__name__", "foo_total"),
+			t:    int64p(1520879607789),
+			e:    &exemplar.Exemplar{Labels: labels.FromStrings("xx", "yy"), Value: 5},
+		}, {
+			m:    "metric",
+			help: "foo\x00bar",
+		}, {
+			m:    "null_byte_metric{a=\"abc\x00\"}",
+			v:    1,
+			lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
+		},
+	}
+
+	p := NewOpenMetricsParser([]byte(input))
+	i := 0
+
+	var res labels.Labels
+
+	for {
+		et, err := p.Next()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(t, err)
+
+		switch et {
+		case EntrySeries:
+			m, ts, v := p.Series()
+
+			var e exemplar.Exemplar
+			p.Metric(&res)
+			found := p.Exemplar(&e)
+			require.Equal(t, exp[i].m, string(m))
+			if e.HasTs {
+				require.Equal(t, exp[i].t, ts)
+			}
+			require.Equal(t, exp[i].v, v)
+			require.Equal(t, exp[i].lset, res)
+			if exp[i].e == nil {
+				require.Equal(t, false, found)
+			} else {
+				require.Equal(t, true, found)
+				require.Equal(t, *exp[i].e, e)
+			}
+			res = res[:0]
+
+		case EntryType:
+			m, typ := p.Type()
+			require.Equal(t, exp[i].m, string(m))
+			require.Equal(t, exp[i].typ, typ)
+
+		case EntryHelp:
+			m, h := p.Help()
+			require.Equal(t, exp[i].m, string(m))
+			require.Equal(t, exp[i].help, string(h))
+
+		case EntryUnit:
+			m, u := p.Unit()
+			require.Equal(t, exp[i].m, string(m))
+			require.Equal(t, exp[i].unit, string(u))
+
+		case EntryComment:
+			require.Equal(t, exp[i].comment, string(p.Comment()))
+		}
+
+		i++
+	}
+	require.Equal(t, len(exp), i)
+}
+
+func TestOpenMetricsParseErrors(t *testing.T) {
+	cases := []struct {
+		input string
+		err   string
+	}{
+		// Happy cases. EOF is returned by the parser at the end of valid
+		// data.
+		{
+			input: "# EOF",
+			err:   "EOF",
+		},
+		{
+			input: "# EOF\n",
+			err:   "EOF",
+		},
+		// Unhappy cases.
+		{
+			input: "",
+			err:   "data does not end with # EOF",
+		},
+		{
+			input: "\n",
+			err:   "\"INVALID\" \"\\n\" is not a valid start token",
+		},
+		{
+			input: "metric",
+			err:   "expected value after metric, got \"EOF\"",
+		},
+		{
+			input: "metric 1",
+			err:   "data does not end with # EOF",
+		},
+		{
+			input: "metric 1\n",
+			err:   "data does not end with # EOF",
+		},
+		{
+			input: "metric_total 1 # {aa=\"bb\"} 4",
+			err:   "data does not end with # EOF",
+		},
+		{
+			input: "a\n#EOF\n",
+			err:   "expected value after metric, got \"INVALID\"",
+		},
+		{
+			input: "\n\n#EOF\n",
+			err:   "\"INVALID\" \"\\n\" is not a valid start token",
+		},
+		{
+			input: " a 1\n#EOF\n",
+			err:   "\"INVALID\" \" \" is not a valid start token",
+		},
+		{
+			input: "9\n#EOF\n",
+			err:   "\"INVALID\" \"9\" is not a valid start token",
+		},
+		{
+			input: "# TYPE u untyped\n#EOF\n",
+			err:   "invalid metric type \"untyped\"",
+		},
+		{
+			input: "# TYPE c counter \n#EOF\n",
+			err:   "invalid metric type \"counter \"",
+		},
+		{
+			input: "#  TYPE c counter\n#EOF\n",
+			err:   "\"INVALID\" \" \" is not a valid start token",
+		},
+		{
+			input: "# UNIT metric suffix\n#EOF\n",
+			err:   "unit not a suffix of metric \"metric\"",
+		},
+		{
+			input: "# UNIT metricsuffix suffix\n#EOF\n",
+			err:   "unit not a suffix of metric \"metricsuffix\"",
+		},
+		{
+			input: "# UNIT m suffix\n#EOF\n",
+			err:   "unit not a suffix of metric \"m\"",
+		},
+		{
+			input: "# HELP m\n#EOF\n",
+			err:   "expected text in HELP, got \"INVALID\"",
+		},
+		{
+			input: "a\t1\n#EOF\n",
+			err:   "expected value after metric, got \"INVALID\"",
+		},
+		{
+			input: "a 1\t2\n#EOF\n",
+			err:   "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax",
+		},
+		{
+			input: "a 1 2 \n#EOF\n",
+			err:   "expected next entry after timestamp, got \"INVALID\"",
+		},
+		{
+			input: "a 1 2 #\n#EOF\n",
+			err:   "expected next entry after timestamp, got \"TIMESTAMP\"",
+		},
+		{
+			input: "a 1 1z\n#EOF\n",
+			err:   "strconv.ParseFloat: parsing \"1z\": invalid syntax",
+		},
+		{
+			input: " # EOF\n",
+			err:   "\"INVALID\" \" \" is not a valid start token",
+		},
+		{
+			input: "# EOF\na 1",
+			err:   "unexpected data after # EOF",
+		},
+		{
+			input: "# EOF\n\n",
+			err:   "unexpected data after # EOF",
+		},
+		{
+			input: "# EOFa 1",
+			err:   "unexpected data after # EOF",
+		},
+		{
+			input: "#\tTYPE c counter\n",
+			err:   "\"INVALID\" \"\\t\" is not a valid start token",
+		},
+		{
+			input: "# TYPE c  counter\n",
+			err:   "invalid metric type \" counter\"",
+		},
+		{
+			input: "a 1 1 1\n# EOF\n",
+			err:   "expected next entry after timestamp, got \"TIMESTAMP\"",
+		},
+		{
+			input: "a{b='c'} 1\n# EOF\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{b=\"c\",} 1\n# EOF\n",
+			err:   "expected label name, got \"BCLOSE\"",
+		},
+		{
+			input: "a{,b=\"c\"} 1\n# EOF\n",
+			err:   "expected label name or left brace, got \"COMMA\"",
+		},
+		{
+			input: "a{b=\"c\"d=\"e\"} 1\n# EOF\n",
+			err:   "expected comma, got \"LNAME\"",
+		},
+		{
+			input: "a{b=\"c\",,d=\"e\"} 1\n# EOF\n",
+			err:   "expected label name, got \"COMMA\"",
+		},
+		{
+			input: "a{b=\n# EOF\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{\xff=\"foo\"} 1\n# EOF\n",
+			err:   "expected label name or left brace, got \"INVALID\"",
+		},
+		{
+			input: "a{b=\"\xff\"} 1\n# EOF\n",
+			err:   "invalid UTF-8 label value",
+		},
+		{
+			input: "a true\n",
+			err:   "strconv.ParseFloat: parsing \"true\": invalid syntax",
+		},
+		{
+			input: "something_weird{problem=\"\n# EOF\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "empty_label_name{=\"\"} 0\n# EOF\n",
+			err:   "expected label name or left brace, got \"EQUAL\"",
+		},
+		{
+			input: "foo 1_2\n\n# EOF\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "foo 0x1p-3\n\n# EOF\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "foo 0x1P-3\n\n# EOF\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "foo 0 1_2\n\n# EOF\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "custom_metric_total 1 # {aa=bb}\n# EOF\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "custom_metric_total 1 # {aa=\"bb\"}\n# EOF\n",
+			err:   "expected value after exemplar labels, got \"INVALID\"",
+		},
+		{
+			input: `custom_metric_total 1 # {aa="bb"}`,
+			err:   "expected value after exemplar labels, got \"EOF\"",
+		},
+		{
+			input: `custom_metric 1 # {aa="bb"}`,
+			err:   "metric name custom_metric does not support exemplars",
+		},
+		{
+			input: `custom_metric_total 1 # {aa="bb",,cc="dd"} 1`,
+			err:   "expected label name, got \"COMMA\"",
+		},
+		{
+			input: `custom_metric_total 1 # {aa="bb"} 1_2`,
+			err:   "unsupported character in float",
+		},
+		{
+			input: `custom_metric_total 1 # {aa="bb"} 0x1p-3`,
+			err:   "unsupported character in float",
+		},
+		{
+			input: `custom_metric_total 1 # {aa="bb"} true`,
+			err:   "strconv.ParseFloat: parsing \"true\": invalid syntax",
+		},
+		{
+			input: `custom_metric_total 1 # {aa="bb",cc=}`,
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: `custom_metric_total 1 # {aa=\"\xff\"} 9.0`,
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: `{b="c",} 1`,
+			err:   `"INVALID" "{" is not a valid start token`,
+		},
+		{
+			input: `a 1 NaN`,
+			err:   `invalid timestamp`,
+		},
+		{
+			input: `a 1 -Inf`,
+			err:   `invalid timestamp`,
+		},
+		{
+			input: `a 1 Inf`,
+			err:   `invalid timestamp`,
+		},
+		{
+			input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 NaN",
+			err:   `invalid exemplar timestamp`,
+		},
+		{
+			input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
+			err:   `invalid exemplar timestamp`,
+		},
+		{
+			input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
+			err:   `invalid exemplar timestamp`,
+		},
+	}
+
+	for i, c := range cases {
+		p := NewOpenMetricsParser([]byte(c.input))
+		var err error
+		for err == nil {
+			_, err = p.Next()
+		}
+		require.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input)
+	}
+}
+
+func TestOMNullByteHandling(t *testing.T) {
+	cases := []struct {
+		input string
+		err   string
+	}{
+		{
+			input: "null_byte_metric{a=\"abc\x00\"} 1\n# EOF\n",
+			err:   "",
+		},
+		{
+			input: "a{b=\"\x00ss\"} 1\n# EOF\n",
+			err:   "",
+		},
+		{
+			input: "a{b=\"\x00\"} 1\n# EOF\n",
+			err:   "",
+		},
+		{
+			input: "a{b=\"\x00\"} 1\n# EOF",
+			err:   "",
+		},
+		{
+			input: "a{b=\x00\"ssss\"} 1\n# EOF\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{b=\"\x00",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{b\x00=\"hiih\"}	1",
+			err: "expected equal, got \"INVALID\"",
+		},
+		{
+			input: "a\x00{b=\"ddd\"} 1",
+			err:   "expected value after metric, got \"INVALID\"",
+		},
+		{
+			input: "#",
+			err:   "\"INVALID\" \" \" is not a valid start token",
+		},
+		{
+			input: "# H",
+			err:   "\"INVALID\" \" \" is not a valid start token",
+		},
+		{
+			input: "custom_metric_total 1 # {b=\x00\"ssss\"} 1\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "custom_metric_total 1 # {b=\"\x00ss\"} 1\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+	}
+
+	for i, c := range cases {
+		p := NewOpenMetricsParser([]byte(c.input))
+		var err error
+		for err == nil {
+			_, err = p.Next()
+		}
+
+		if c.err == "" {
+			require.Equal(t, io.EOF, err, "test %d", i)
+			continue
+		}
+
+		require.Equal(t, c.err, err.Error(), "test %d", i)
+	}
+}
diff -pruN 2.31.2+ds1-1/model/textparse/promlex.l 2.33.5+ds1-2/model/textparse/promlex.l
--- 2.31.2+ds1-1/model/textparse/promlex.l	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/promlex.l	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,100 @@
+%{
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+    "fmt"
+)
+
+const (
+    sInit = iota
+    sComment
+    sMeta1
+    sMeta2
+    sLabels
+    sLValue
+    sValue
+    sTimestamp
+)
+
+// Lex is called by the parser generated by "go tool yacc" to obtain each
+// token. The method is opened before the matching rules block and closed at
+// the end of the file.
+func (l *promlexer) Lex() token {
+    if l.i >= len(l.b) {
+        return tEOF
+    }
+    c := l.b[l.i]
+    l.start = l.i
+
+%}
+
+D     [0-9]
+L     [a-zA-Z_]
+M     [a-zA-Z_:]
+C     [^\n]
+
+%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp
+
+%yyc c
+%yyn c = l.next()
+%yyt l.state
+
+
+%%
+
+\0                                    return tEOF
+\n                                    l.state = sInit; return tLinebreak
+<*>[ \t]+                             return tWhitespace
+
+#[ \t]+                               l.state = sComment
+#                                     return l.consumeComment()
+<sComment>HELP[\t ]+                  l.state = sMeta1; return tHelp
+<sComment>TYPE[\t ]+                  l.state = sMeta1; return tType
+<sMeta1>{M}({M}|{D})*                 l.state = sMeta2; return tMName
+<sMeta2>{C}*                          l.state = sInit; return tText
+
+{M}({M}|{D})*                         l.state = sValue; return tMName
+<sValue>\{                            l.state = sLabels; return tBraceOpen
+<sLabels>{L}({L}|{D})*                return tLName
+<sLabels>\}                           l.state = sValue; return tBraceClose
+<sLabels>=                            l.state = sLValue; return tEqual
+<sLabels>,                            return tComma
+<sLValue>\"(\\.|[^\\"])*\"            l.state = sLabels; return tLValue
+<sValue>[^{ \t\n]+                    l.state = sTimestamp; return tValue
+<sTimestamp>{D}+                      return tTimestamp
+<sTimestamp>\n                        l.state = sInit; return tLinebreak
+
+%%
+    // Workaround to gobble up comments that started with a HELP or TYPE
+    // prefix. We just consume all characters until we reach a newline.
+    // This saves us from adding disproportionate complexity to the parser.
+    if l.state == sComment {
+        return l.consumeComment()
+    }
+    return tInvalid
+}
+
+func (l *promlexer) consumeComment() token {
+    for c := l.cur(); ; c = l.next() {
+        switch c {
+        case 0:
+            return tEOF
+        case '\n':
+            l.state = sInit
+            return tComment
+        }
+    }
+}
diff -pruN 2.31.2+ds1-1/model/textparse/promlex.l.go 2.33.5+ds1-2/model/textparse/promlex.l.go
--- 2.31.2+ds1-1/model/textparse/promlex.l.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/promlex.l.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,553 @@
+// CAUTION: Generated file - DO NOT EDIT.
+
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+	"github.com/pkg/errors"
+)
+
+const (
+	sInit = iota
+	sComment
+	sMeta1
+	sMeta2
+	sLabels
+	sLValue
+	sValue
+	sTimestamp
+	sExemplar
+	sEValue
+	sETimestamp
+)
+
+// Lex is called by the parser generated by "go tool yacc" to obtain each
+// token. The method is opened before the matching rules block and closed at
+// the end of the file.
+func (l *promlexer) Lex() token {
+	if l.i >= len(l.b) {
+		return tEOF
+	}
+	c := l.b[l.i]
+	l.start = l.i
+
+yystate0:
+
+	switch yyt := l.state; yyt {
+	default:
+		panic(errors.Errorf(`invalid start condition %d`, yyt))
+	case 0: // start condition: INITIAL
+		goto yystart1
+	case 1: // start condition: sComment
+		goto yystart8
+	case 2: // start condition: sMeta1
+		goto yystart19
+	case 3: // start condition: sMeta2
+		goto yystart21
+	case 4: // start condition: sLabels
+		goto yystart24
+	case 5: // start condition: sLValue
+		goto yystart29
+	case 6: // start condition: sValue
+		goto yystart33
+	case 7: // start condition: sTimestamp
+		goto yystart36
+	}
+
+	goto yystate0 // silence unused label error
+	goto yystate1 // silence unused label error
+yystate1:
+	c = l.next()
+yystart1:
+	switch {
+	default:
+		goto yyabort
+	case c == '#':
+		goto yystate5
+	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate7
+	case c == '\n':
+		goto yystate4
+	case c == '\t' || c == ' ':
+		goto yystate3
+	case c == '\x00':
+		goto yystate2
+	}
+
+yystate2:
+	c = l.next()
+	goto yyrule1
+
+yystate3:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule3
+	case c == '\t' || c == ' ':
+		goto yystate3
+	}
+
+yystate4:
+	c = l.next()
+	goto yyrule2
+
+yystate5:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule5
+	case c == '\t' || c == ' ':
+		goto yystate6
+	}
+
+yystate6:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule4
+	case c == '\t' || c == ' ':
+		goto yystate6
+	}
+
+yystate7:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule10
+	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate7
+	}
+
+	goto yystate8 // silence unused label error
+yystate8:
+	c = l.next()
+yystart8:
+	switch {
+	default:
+		goto yyabort
+	case c == 'H':
+		goto yystate9
+	case c == 'T':
+		goto yystate14
+	case c == '\t' || c == ' ':
+		goto yystate3
+	}
+
+yystate9:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'E':
+		goto yystate10
+	}
+
+yystate10:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'L':
+		goto yystate11
+	}
+
+yystate11:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'P':
+		goto yystate12
+	}
+
+yystate12:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '\t' || c == ' ':
+		goto yystate13
+	}
+
+yystate13:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule6
+	case c == '\t' || c == ' ':
+		goto yystate13
+	}
+
+yystate14:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'Y':
+		goto yystate15
+	}
+
+yystate15:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'P':
+		goto yystate16
+	}
+
+yystate16:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == 'E':
+		goto yystate17
+	}
+
+yystate17:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '\t' || c == ' ':
+		goto yystate18
+	}
+
+yystate18:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule7
+	case c == '\t' || c == ' ':
+		goto yystate18
+	}
+
+	goto yystate19 // silence unused label error
+yystate19:
+	c = l.next()
+yystart19:
+	switch {
+	default:
+		goto yyabort
+	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate20
+	case c == '\t' || c == ' ':
+		goto yystate3
+	}
+
+yystate20:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule8
+	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate20
+	}
+
+	goto yystate21 // silence unused label error
+yystate21:
+	c = l.next()
+yystart21:
+	switch {
+	default:
+		goto yyrule9
+	case c == '\t' || c == ' ':
+		goto yystate23
+	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate22
+	}
+
+yystate22:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule9
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
+		goto yystate22
+	}
+
+yystate23:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule3
+	case c == '\t' || c == ' ':
+		goto yystate23
+	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
+		goto yystate22
+	}
+
+	goto yystate24 // silence unused label error
+yystate24:
+	c = l.next()
+yystart24:
+	switch {
+	default:
+		goto yyabort
+	case c == ',':
+		goto yystate25
+	case c == '=':
+		goto yystate26
+	case c == '\t' || c == ' ':
+		goto yystate3
+	case c == '}':
+		goto yystate28
+	case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate27
+	}
+
+yystate25:
+	c = l.next()
+	goto yyrule15
+
+yystate26:
+	c = l.next()
+	goto yyrule14
+
+yystate27:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule12
+	case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
+		goto yystate27
+	}
+
+yystate28:
+	c = l.next()
+	goto yyrule13
+
+	goto yystate29 // silence unused label error
+yystate29:
+	c = l.next()
+yystart29:
+	switch {
+	default:
+		goto yyabort
+	case c == '"':
+		goto yystate30
+	case c == '\t' || c == ' ':
+		goto yystate3
+	}
+
+yystate30:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c == '"':
+		goto yystate31
+	case c == '\\':
+		goto yystate32
+	case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
+		goto yystate30
+	}
+
+yystate31:
+	c = l.next()
+	goto yyrule16
+
+yystate32:
+	c = l.next()
+	switch {
+	default:
+		goto yyabort
+	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
+		goto yystate30
+	}
+
+	goto yystate33 // silence unused label error
+yystate33:
+	c = l.next()
+yystart33:
+	switch {
+	default:
+		goto yyabort
+	case c == '\t' || c == ' ':
+		goto yystate3
+	case c == '{':
+		goto yystate35
+	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
+		goto yystate34
+	}
+
+yystate34:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule17
+	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
+		goto yystate34
+	}
+
+yystate35:
+	c = l.next()
+	goto yyrule11
+
+	goto yystate36 // silence unused label error
+yystate36:
+	c = l.next()
+yystart36:
+	switch {
+	default:
+		goto yyabort
+	case c == '\n':
+		goto yystate37
+	case c == '\t' || c == ' ':
+		goto yystate3
+	case c >= '0' && c <= '9':
+		goto yystate38
+	}
+
+yystate37:
+	c = l.next()
+	goto yyrule19
+
+yystate38:
+	c = l.next()
+	switch {
+	default:
+		goto yyrule18
+	case c >= '0' && c <= '9':
+		goto yystate38
+	}
+
+yyrule1: // \0
+	{
+		return tEOF
+	}
+yyrule2: // \n
+	{
+		l.state = sInit
+		return tLinebreak
+		goto yystate0
+	}
+yyrule3: // [ \t]+
+	{
+		return tWhitespace
+	}
+yyrule4: // #[ \t]+
+	{
+		l.state = sComment
+		goto yystate0
+	}
+yyrule5: // #
+	{
+		return l.consumeComment()
+	}
+yyrule6: // HELP[\t ]+
+	{
+		l.state = sMeta1
+		return tHelp
+		goto yystate0
+	}
+yyrule7: // TYPE[\t ]+
+	{
+		l.state = sMeta1
+		return tType
+		goto yystate0
+	}
+yyrule8: // {M}({M}|{D})*
+	{
+		l.state = sMeta2
+		return tMName
+		goto yystate0
+	}
+yyrule9: // {C}*
+	{
+		l.state = sInit
+		return tText
+		goto yystate0
+	}
+yyrule10: // {M}({M}|{D})*
+	{
+		l.state = sValue
+		return tMName
+		goto yystate0
+	}
+yyrule11: // \{
+	{
+		l.state = sLabels
+		return tBraceOpen
+		goto yystate0
+	}
+yyrule12: // {L}({L}|{D})*
+	{
+		return tLName
+	}
+yyrule13: // \}
+	{
+		l.state = sValue
+		return tBraceClose
+		goto yystate0
+	}
+yyrule14: // =
+	{
+		l.state = sLValue
+		return tEqual
+		goto yystate0
+	}
+yyrule15: // ,
+	{
+		return tComma
+	}
+yyrule16: // \"(\\.|[^\\"])*\"
+	{
+		l.state = sLabels
+		return tLValue
+		goto yystate0
+	}
+yyrule17: // [^{ \t\n]+
+	{
+		l.state = sTimestamp
+		return tValue
+		goto yystate0
+	}
+yyrule18: // {D}+
+	{
+		return tTimestamp
+	}
+yyrule19: // \n
+	{
+		l.state = sInit
+		return tLinebreak
+		goto yystate0
+	}
+	panic("unreachable")
+
+	goto yyabort // silence unused label error
+
+yyabort: // no lexem recognized
+	// Workaround to gobble up comments that started with a HELP or TYPE
+	// prefix. We just consume all characters until we reach a newline.
+	// This saves us from adding disproportionate complexity to the parser.
+	if l.state == sComment {
+		return l.consumeComment()
+	}
+	return tInvalid
+}
+
+func (l *promlexer) consumeComment() token {
+	for c := l.cur(); ; c = l.next() {
+		switch c {
+		case 0:
+			return tEOF
+		case '\n':
+			l.state = sInit
+			return tComment
+		}
+	}
+}
diff -pruN 2.31.2+ds1-1/model/textparse/promparse.go 2.33.5+ds1-2/model/textparse/promparse.go
--- 2.31.2+ds1-1/model/textparse/promparse.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/promparse.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,426 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+//go:generate go get -u modernc.org/golex
+//go:generate golex -o=promlex.l.go promlex.l
+
+package textparse
+
+import (
+	"fmt"
+	"io"
+	"math"
+	"sort"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+	"unsafe"
+
+	"github.com/pkg/errors"
+
+	"github.com/prometheus/prometheus/model/exemplar"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/value"
+)
+
+type promlexer struct {
+	b     []byte
+	i     int
+	start int
+	err   error
+	state int
+}
+
+type token int
+
+const (
+	tInvalid   token = -1
+	tEOF       token = 0
+	tLinebreak token = iota
+	tWhitespace
+	tHelp
+	tType
+	tUnit
+	tEOFWord
+	tText
+	tComment
+	tBlank
+	tMName
+	tBraceOpen
+	tBraceClose
+	tLName
+	tLValue
+	tComma
+	tEqual
+	tTimestamp
+	tValue
+)
+
+func (t token) String() string {
+	switch t {
+	case tInvalid:
+		return "INVALID"
+	case tEOF:
+		return "EOF"
+	case tLinebreak:
+		return "LINEBREAK"
+	case tWhitespace:
+		return "WHITESPACE"
+	case tHelp:
+		return "HELP"
+	case tType:
+		return "TYPE"
+	case tUnit:
+		return "UNIT"
+	case tEOFWord:
+		return "EOFWORD"
+	case tText:
+		return "TEXT"
+	case tComment:
+		return "COMMENT"
+	case tBlank:
+		return "BLANK"
+	case tMName:
+		return "MNAME"
+	case tBraceOpen:
+		return "BOPEN"
+	case tBraceClose:
+		return "BCLOSE"
+	case tLName:
+		return "LNAME"
+	case tLValue:
+		return "LVALUE"
+	case tEqual:
+		return "EQUAL"
+	case tComma:
+		return "COMMA"
+	case tTimestamp:
+		return "TIMESTAMP"
+	case tValue:
+		return "VALUE"
+	}
+	return fmt.Sprintf("<invalid: %d>", t)
+}
+
+// buf returns the buffer of the current token.
+func (l *promlexer) buf() []byte {
+	return l.b[l.start:l.i]
+}
+
+func (l *promlexer) cur() byte {
+	return l.b[l.i]
+}
+
+// next advances the promlexer to the next character.
+func (l *promlexer) next() byte {
+	l.i++
+	if l.i >= len(l.b) {
+		l.err = io.EOF
+		return byte(tEOF)
+	}
+	// Lex struggles with null bytes. If we are in a label value or help string, where
+	// they are allowed, consume them here immediately.
+	for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
+		l.i++
+	}
+	return l.b[l.i]
+}
+
+func (l *promlexer) Error(es string) {
+	l.err = errors.New(es)
+}
+
+// PromParser parses samples from a byte slice of samples in the official
+// Prometheus text exposition format.
+type PromParser struct {
+	l       *promlexer
+	series  []byte
+	text    []byte
+	mtype   MetricType
+	val     float64
+	ts      int64
+	hasTS   bool
+	start   int
+	offsets []int
+}
+
+// NewPromParser returns a new parser of the byte slice.
+func NewPromParser(b []byte) Parser {
+	return &PromParser{l: &promlexer{b: append(b, '\n')}}
+}
+
+// Series returns the bytes of the series, the timestamp if set, and the value
+// of the current sample.
+func (p *PromParser) Series() ([]byte, *int64, float64) {
+	if p.hasTS {
+		return p.series, &p.ts, p.val
+	}
+	return p.series, nil, p.val
+}
+
+// Help returns the metric name and help text in the current entry.
+// Must only be called after Next returned a help entry.
+// The returned byte slices become invalid after the next call to Next.
+func (p *PromParser) Help() ([]byte, []byte) {
+	m := p.l.b[p.offsets[0]:p.offsets[1]]
+
+	// Replacer causes allocations. Replace only when necessary.
+	if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
+		return m, []byte(helpReplacer.Replace(string(p.text)))
+	}
+	return m, p.text
+}
+
+// Type returns the metric name and type in the current entry.
+// Must only be called after Next returned a type entry.
+// The returned byte slices become invalid after the next call to Next.
+func (p *PromParser) Type() ([]byte, MetricType) {
+	return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
+}
+
+// Unit returns the metric name and unit in the current entry.
+// Must only be called after Next returned a unit entry.
+// The returned byte slices become invalid after the next call to Next.
+func (p *PromParser) Unit() ([]byte, []byte) {
+	// The Prometheus format does not have units.
+	return nil, nil
+}
+
+// Comment returns the text of the current comment.
+// Must only be called after Next returned a comment entry.
+// The returned byte slice becomes invalid after the next call to Next.
+func (p *PromParser) Comment() []byte {
+	return p.text
+}
+
+// Metric writes the labels of the current sample into the passed labels.
+// It returns the string from which the metric was parsed.
+func (p *PromParser) Metric(l *labels.Labels) string {
+	// Allocate the full immutable string immediately, so we just
+	// have to create references on it below.
+	s := string(p.series)
+
+	*l = append(*l, labels.Label{
+		Name:  labels.MetricName,
+		Value: s[:p.offsets[0]-p.start],
+	})
+
+	for i := 1; i < len(p.offsets); i += 4 {
+		a := p.offsets[i] - p.start
+		b := p.offsets[i+1] - p.start
+		c := p.offsets[i+2] - p.start
+		d := p.offsets[i+3] - p.start
+
+		// Replacer causes allocations. Replace only when necessary.
+		if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
+			*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
+			continue
+		}
+		*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
+	}
+
+	// Sort labels to maintain the sorted labels invariant.
+	sort.Sort(*l)
+
+	return s
+}
+
+// Exemplar writes the exemplar of the current sample into the passed
+// exemplar. It returns if an exemplar exists.
+func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool {
+	return false
+}
+
+// nextToken returns the next token from the promlexer. It skips over tabs
+// and spaces.
+func (p *PromParser) nextToken() token {
+	for {
+		if tok := p.l.Lex(); tok != tWhitespace {
+			return tok
+		}
+	}
+}
+
+func parseError(exp string, got token) error {
+	return errors.Errorf("%s, got %q", exp, got)
+}
+
+// Next advances the parser to the next sample. It returns false if no
+// more samples were read or an error occurred.
+func (p *PromParser) Next() (Entry, error) {
+	var err error
+
+	p.start = p.l.i
+	p.offsets = p.offsets[:0]
+
+	switch t := p.nextToken(); t {
+	case tEOF:
+		return EntryInvalid, io.EOF
+	case tLinebreak:
+		// Allow full blank lines.
+		return p.Next()
+
+	case tHelp, tType:
+		switch t := p.nextToken(); t {
+		case tMName:
+			p.offsets = append(p.offsets, p.l.start, p.l.i)
+		default:
+			return EntryInvalid, parseError("expected metric name after HELP", t)
+		}
+		switch t := p.nextToken(); t {
+		case tText:
+			if len(p.l.buf()) > 1 {
+				p.text = p.l.buf()[1:]
+			} else {
+				p.text = []byte{}
+			}
+		default:
+			return EntryInvalid, parseError("expected text in HELP", t)
+		}
+		switch t {
+		case tType:
+			switch s := yoloString(p.text); s {
+			case "counter":
+				p.mtype = MetricTypeCounter
+			case "gauge":
+				p.mtype = MetricTypeGauge
+			case "histogram":
+				p.mtype = MetricTypeHistogram
+			case "summary":
+				p.mtype = MetricTypeSummary
+			case "untyped":
+				p.mtype = MetricTypeUnknown
+			default:
+				return EntryInvalid, errors.Errorf("invalid metric type %q", s)
+			}
+		case tHelp:
+			if !utf8.Valid(p.text) {
+				return EntryInvalid, errors.Errorf("help text is not a valid utf8 string")
+			}
+		}
+		if t := p.nextToken(); t != tLinebreak {
+			return EntryInvalid, parseError("linebreak expected after metadata", t)
+		}
+		switch t {
+		case tHelp:
+			return EntryHelp, nil
+		case tType:
+			return EntryType, nil
+		}
+	case tComment:
+		p.text = p.l.buf()
+		if t := p.nextToken(); t != tLinebreak {
+			return EntryInvalid, parseError("linebreak expected after comment", t)
+		}
+		return EntryComment, nil
+
+	case tMName:
+		p.offsets = append(p.offsets, p.l.i)
+		p.series = p.l.b[p.start:p.l.i]
+
+		t2 := p.nextToken()
+		if t2 == tBraceOpen {
+			if err := p.parseLVals(); err != nil {
+				return EntryInvalid, err
+			}
+			p.series = p.l.b[p.start:p.l.i]
+			t2 = p.nextToken()
+		}
+		if t2 != tValue {
+			return EntryInvalid, parseError("expected value after metric", t)
+		}
+		if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
+			return EntryInvalid, err
+		}
+		// Ensure canonical NaN value.
+		if math.IsNaN(p.val) {
+			p.val = math.Float64frombits(value.NormalNaN)
+		}
+		p.hasTS = false
+		switch p.nextToken() {
+		case tLinebreak:
+			break
+		case tTimestamp:
+			p.hasTS = true
+			if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
+				return EntryInvalid, err
+			}
+			if t2 := p.nextToken(); t2 != tLinebreak {
+				return EntryInvalid, parseError("expected next entry after timestamp", t)
+			}
+		default:
+			return EntryInvalid, parseError("expected timestamp or new record", t)
+		}
+		return EntrySeries, nil
+
+	default:
+		err = errors.Errorf("%q is not a valid start token", t)
+	}
+	return EntryInvalid, err
+}
+
+func (p *PromParser) parseLVals() error {
+	t := p.nextToken()
+	for {
+		switch t {
+		case tBraceClose:
+			return nil
+		case tLName:
+		default:
+			return parseError("expected label name", t)
+		}
+		p.offsets = append(p.offsets, p.l.start, p.l.i)
+
+		if t := p.nextToken(); t != tEqual {
+			return parseError("expected equal", t)
+		}
+		if t := p.nextToken(); t != tLValue {
+			return parseError("expected label value", t)
+		}
+		if !utf8.Valid(p.l.buf()) {
+			return errors.Errorf("invalid UTF-8 label value")
+		}
+
+		// The promlexer ensures the value string is quoted. Strip first
+		// and last character.
+		p.offsets = append(p.offsets, p.l.start+1, p.l.i-1)
+
+		// Free trailing commas are allowed.
+		if t = p.nextToken(); t == tComma {
+			t = p.nextToken()
+		}
+	}
+}
+
+var lvalReplacer = strings.NewReplacer(
+	`\"`, "\"",
+	`\\`, "\\",
+	`\n`, "\n",
+)
+
+var helpReplacer = strings.NewReplacer(
+	`\\`, "\\",
+	`\n`, "\n",
+)
+
+func yoloString(b []byte) string {
+	return *((*string)(unsafe.Pointer(&b)))
+}
+
+func parseFloat(s string) (float64, error) {
+	// Keep to pre-Go 1.13 float formats.
+	if strings.ContainsAny(s, "pP_") {
+		return 0, fmt.Errorf("unsupported character in float")
+	}
+	return strconv.ParseFloat(s, 64)
+}
diff -pruN 2.31.2+ds1-1/model/textparse/promparse_test.go 2.33.5+ds1-2/model/textparse/promparse_test.go
--- 2.31.2+ds1-1/model/textparse/promparse_test.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/promparse_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,520 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package textparse
+
+import (
+	"bytes"
+	"compress/gzip"
+	"io"
+	"io/ioutil"
+	"os"
+	"testing"
+
+	"github.com/prometheus/common/expfmt"
+	"github.com/prometheus/common/model"
+	"github.com/stretchr/testify/require"
+
+	"github.com/prometheus/prometheus/model/labels"
+)
+
+func TestPromParse(t *testing.T) {
+	input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# 	TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 4.9351e-05
+go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05
+go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
+go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05
+go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05
+# Hrandom comment starting with prefix of HELP
+#
+wind_speed{A="2",c="3"} 12345
+# comment with escaped \n newline
+# comment with escaped \ escape character
+# HELP nohelp1
+# HELP nohelp2 
+go_gc_duration_seconds{ quantile="1.0", a="b" } 8.3835e-05
+go_gc_duration_seconds { quantile="1.0", a="b" } 8.3835e-05
+go_gc_duration_seconds { quantile= "1.0", a= "b", } 8.3835e-05
+go_gc_duration_seconds { quantile = "1.0", a = "b" } 8.3835e-05
+go_gc_duration_seconds_count 99
+some:aggregate:rate5m{a_b="c"}	1
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 33  	123123
+_metric_starting_with_underscore 1
+testmetric{_label_starting_with_underscore="foo"} 1
+testmetric{label="\"bar\""} 1`
+	input += "\n# HELP metric foo\x00bar"
+	input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
+
+	int64p := func(x int64) *int64 { return &x }
+
+	exp := []struct {
+		lset    labels.Labels
+		m       string
+		t       *int64
+		v       float64
+		typ     MetricType
+		help    string
+		comment string
+	}{
+		{
+			m:    "go_gc_duration_seconds",
+			help: "A summary of the GC invocation durations.",
+		}, {
+			m:   "go_gc_duration_seconds",
+			typ: MetricTypeSummary,
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0"}`,
+			v:    4.9351e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0.25",}`,
+			v:    7.424100000000001e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0.5",a="b"}`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds{quantile="0.8", a="b"}`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
+		}, {
+			comment: "# Hrandom comment starting with prefix of HELP",
+		}, {
+			comment: "#",
+		}, {
+			m:    `wind_speed{A="2",c="3"}`,
+			v:    12345,
+			lset: labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"),
+		}, {
+			comment: "# comment with escaped \\n newline",
+		}, {
+			comment: "# comment with escaped \\ escape character",
+		}, {
+			m:    "nohelp1",
+			help: "",
+		}, {
+			m:    "nohelp2",
+			help: "",
+		}, {
+			m:    `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds { quantile="1.0", a="b" }`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds { quantile = "1.0", a = "b" }`,
+			v:    8.3835e-05,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
+		}, {
+			m:    `go_gc_duration_seconds_count`,
+			v:    99,
+			lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
+		}, {
+			m:    `some:aggregate:rate5m{a_b="c"}`,
+			v:    1,
+			lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
+		}, {
+			m:    "go_goroutines",
+			help: "Number of goroutines that currently exist.",
+		}, {
+			m:   "go_goroutines",
+			typ: MetricTypeGauge,
+		}, {
+			m:    `go_goroutines`,
+			v:    33,
+			t:    int64p(123123),
+			lset: labels.FromStrings("__name__", "go_goroutines"),
+		}, {
+			m:    "_metric_starting_with_underscore",
+			v:    1,
+			lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
+		}, {
+			m:    "testmetric{_label_starting_with_underscore=\"foo\"}",
+			v:    1,
+			lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
+		}, {
+			m:    "testmetric{label=\"\\\"bar\\\"\"}",
+			v:    1,
+			lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
+		}, {
+			m:    "metric",
+			help: "foo\x00bar",
+		}, {
+			m:    "null_byte_metric{a=\"abc\x00\"}",
+			v:    1,
+			lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
+		},
+	}
+
+	p := NewPromParser([]byte(input))
+	i := 0
+
+	var res labels.Labels
+
+	for {
+		et, err := p.Next()
+		if err == io.EOF {
+			break
+		}
+		require.NoError(t, err)
+
+		switch et {
+		case EntrySeries:
+			m, ts, v := p.Series()
+
+			p.Metric(&res)
+
+			require.Equal(t, exp[i].m, string(m))
+			require.Equal(t, exp[i].t, ts)
+			require.Equal(t, exp[i].v, v)
+			require.Equal(t, exp[i].lset, res)
+			res = res[:0]
+
+		case EntryType:
+			m, typ := p.Type()
+			require.Equal(t, exp[i].m, string(m))
+			require.Equal(t, exp[i].typ, typ)
+
+		case EntryHelp:
+			m, h := p.Help()
+			require.Equal(t, exp[i].m, string(m))
+			require.Equal(t, exp[i].help, string(h))
+
+		case EntryComment:
+			require.Equal(t, exp[i].comment, string(p.Comment()))
+		}
+
+		i++
+	}
+	require.Equal(t, len(exp), i)
+}
+
+func TestPromParseErrors(t *testing.T) {
+	cases := []struct {
+		input string
+		err   string
+	}{
+		{
+			input: "a",
+			err:   "expected value after metric, got \"MNAME\"",
+		},
+		{
+			input: "a{b='c'} 1\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{b=\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{\xff=\"foo\"} 1\n",
+			err:   "expected label name, got \"INVALID\"",
+		},
+		{
+			input: "a{b=\"\xff\"} 1\n",
+			err:   "invalid UTF-8 label value",
+		},
+		{
+			input: "a true\n",
+			err:   "strconv.ParseFloat: parsing \"true\": invalid syntax",
+		},
+		{
+			input: "something_weird{problem=\"",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "empty_label_name{=\"\"} 0",
+			err:   "expected label name, got \"EQUAL\"",
+		},
+		{
+			input: "foo 1_2\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "foo 0x1p-3\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "foo 0x1P-3\n",
+			err:   "unsupported character in float",
+		},
+		{
+			input: "foo 0 1_2\n",
+			err:   "expected next entry after timestamp, got \"MNAME\"",
+		},
+		{
+			input: `{a="ok"} 1`,
+			err:   `"INVALID" is not a valid start token`,
+		},
+	}
+
+	for i, c := range cases {
+		p := NewPromParser([]byte(c.input))
+		var err error
+		for err == nil {
+			_, err = p.Next()
+		}
+		require.Error(t, err)
+		require.Equal(t, c.err, err.Error(), "test %d", i)
+	}
+}
+
+func TestPromNullByteHandling(t *testing.T) {
+	cases := []struct {
+		input string
+		err   string
+	}{
+		{
+			input: "null_byte_metric{a=\"abc\x00\"} 1",
+			err:   "",
+		},
+		{
+			input: "a{b=\"\x00ss\"} 1\n",
+			err:   "",
+		},
+		{
+			input: "a{b=\"\x00\"} 1\n",
+			err:   "",
+		},
+		{
+			input: "a{b=\"\x00\"} 1\n",
+			err:   "",
+		},
+		{
+			input: "a{b=\x00\"ssss\"} 1\n",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{b=\"\x00",
+			err:   "expected label value, got \"INVALID\"",
+		},
+		{
+			input: "a{b\x00=\"hiih\"}	1",
+			err: "expected equal, got \"INVALID\"",
+		},
+		{
+			input: "a\x00{b=\"ddd\"} 1",
+			err:   "expected value after metric, got \"MNAME\"",
+		},
+	}
+
+	for i, c := range cases {
+		p := NewPromParser([]byte(c.input))
+		var err error
+		for err == nil {
+			_, err = p.Next()
+		}
+
+		if c.err == "" {
+			require.Equal(t, io.EOF, err, "test %d", i)
+			continue
+		}
+
+		require.Error(t, err)
+		require.Equal(t, c.err, err.Error(), "test %d", i)
+	}
+}
+
+const (
+	promtestdataSampleCount = 410
+)
+
+func BenchmarkParse(b *testing.B) {
+	for parserName, parser := range map[string]func([]byte) Parser{
+		"prometheus":  NewPromParser,
+		"openmetrics": NewOpenMetricsParser,
+	} {
+		for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
+			f, err := os.Open(fn)
+			require.NoError(b, err)
+			defer f.Close()
+
+			buf, err := ioutil.ReadAll(f)
+			require.NoError(b, err)
+
+			b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) {
+				total := 0
+
+				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
+				b.ReportAllocs()
+				b.ResetTimer()
+
+				for i := 0; i < b.N; i += promtestdataSampleCount {
+					p := parser(buf)
+
+				Outer:
+					for i < b.N {
+						t, err := p.Next()
+						switch t {
+						case EntryInvalid:
+							if err == io.EOF {
+								break Outer
+							}
+							b.Fatal(err)
+						case EntrySeries:
+							m, _, _ := p.Series()
+							total += len(m)
+							i++
+						}
+					}
+				}
+				_ = total
+			})
+			b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) {
+				total := 0
+
+				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
+				b.ReportAllocs()
+				b.ResetTimer()
+
+				for i := 0; i < b.N; i += promtestdataSampleCount {
+					p := parser(buf)
+
+				Outer:
+					for i < b.N {
+						t, err := p.Next()
+						switch t {
+						case EntryInvalid:
+							if err == io.EOF {
+								break Outer
+							}
+							b.Fatal(err)
+						case EntrySeries:
+							m, _, _ := p.Series()
+
+							res := make(labels.Labels, 0, 5)
+							p.Metric(&res)
+
+							total += len(m)
+							i++
+						}
+					}
+				}
+				_ = total
+			})
+			b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) {
+				total := 0
+				res := make(labels.Labels, 0, 5)
+
+				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
+				b.ReportAllocs()
+				b.ResetTimer()
+
+				for i := 0; i < b.N; i += promtestdataSampleCount {
+					p := parser(buf)
+
+				Outer:
+					for i < b.N {
+						t, err := p.Next()
+						switch t {
+						case EntryInvalid:
+							if err == io.EOF {
+								break Outer
+							}
+							b.Fatal(err)
+						case EntrySeries:
+							m, _, _ := p.Series()
+
+							p.Metric(&res)
+
+							total += len(m)
+							i++
+							res = res[:0]
+						}
+					}
+				}
+				_ = total
+			})
+			b.Run("expfmt-text/"+fn, func(b *testing.B) {
+				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
+				b.ReportAllocs()
+				b.ResetTimer()
+
+				total := 0
+
+				for i := 0; i < b.N; i += promtestdataSampleCount {
+					decSamples := make(model.Vector, 0, 50)
+					sdec := expfmt.SampleDecoder{
+						Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText),
+						Opts: &expfmt.DecodeOptions{
+							Timestamp: model.TimeFromUnixNano(0),
+						},
+					}
+
+					for {
+						if err = sdec.Decode(&decSamples); err != nil {
+							break
+						}
+						total += len(decSamples)
+						decSamples = decSamples[:0]
+					}
+				}
+				_ = total
+			})
+		}
+	}
+}
+
+func BenchmarkGzip(b *testing.B) {
+	for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
+		b.Run(fn, func(b *testing.B) {
+			f, err := os.Open(fn)
+			require.NoError(b, err)
+			defer f.Close()
+
+			var buf bytes.Buffer
+			gw := gzip.NewWriter(&buf)
+
+			n, err := io.Copy(gw, f)
+			require.NoError(b, err)
+			require.NoError(b, gw.Close())
+
+			gbuf, err := ioutil.ReadAll(&buf)
+			require.NoError(b, err)
+
+			k := b.N / promtestdataSampleCount
+
+			b.ReportAllocs()
+			b.SetBytes(int64(k) * int64(n))
+			b.ResetTimer()
+
+			total := 0
+
+			for i := 0; i < k; i++ {
+				gr, err := gzip.NewReader(bytes.NewReader(gbuf))
+				require.NoError(b, err)
+
+				d, err := ioutil.ReadAll(gr)
+				require.NoError(b, err)
+				require.NoError(b, gr.Close())
+
+				total += len(d)
+			}
+			_ = total
+		})
+	}
+}
diff -pruN 2.31.2+ds1-1/model/textparse/promtestdata.nometa.txt 2.33.5+ds1-2/model/textparse/promtestdata.nometa.txt
--- 2.31.2+ds1-1/model/textparse/promtestdata.nometa.txt	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/promtestdata.nometa.txt	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,411 @@
+go_gc_duration_seconds{quantile="0"} 4.9351e-05
+go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
+go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
+go_gc_duration_seconds{quantile="0.75"} 0.000106744
+go_gc_duration_seconds{quantile="1"} 0.002072195
+go_gc_duration_seconds_sum 0.012139815
+go_gc_duration_seconds_count 99
+go_goroutines 33
+go_memstats_alloc_bytes 1.7518624e+07
+go_memstats_alloc_bytes_total 8.3062296e+08
+go_memstats_buck_hash_sys_bytes 1.494637e+06
+go_memstats_frees_total 4.65658e+06
+go_memstats_gc_sys_bytes 1.107968e+06
+go_memstats_heap_alloc_bytes 1.7518624e+07
+go_memstats_heap_idle_bytes 6.668288e+06
+go_memstats_heap_inuse_bytes 1.8956288e+07
+go_memstats_heap_objects 72755
+go_memstats_heap_released_bytes_total 0
+go_memstats_heap_sys_bytes 2.5624576e+07
+go_memstats_last_gc_time_seconds 1.4843955586166437e+09
+go_memstats_lookups_total 2089
+go_memstats_mallocs_total 4.729335e+06
+go_memstats_mcache_inuse_bytes 9600
+go_memstats_mcache_sys_bytes 16384
+go_memstats_mspan_inuse_bytes 211520
+go_memstats_mspan_sys_bytes 245760
+go_memstats_next_gc_bytes 2.033527e+07
+go_memstats_other_sys_bytes 2.077323e+06
+go_memstats_stack_inuse_bytes 1.6384e+06
+go_memstats_stack_sys_bytes 1.6384e+06
+go_memstats_sys_bytes 3.2205048e+07
+http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="alerts"} 0
+http_request_duration_microseconds_count{handler="alerts"} 0
+http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="config"} 0
+http_request_duration_microseconds_count{handler="config"} 0
+http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="consoles"} 0
+http_request_duration_microseconds_count{handler="consoles"} 0
+http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="drop_series"} 0
+http_request_duration_microseconds_count{handler="drop_series"} 0
+http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="federate"} 0
+http_request_duration_microseconds_count{handler="federate"} 0
+http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="flags"} 0
+http_request_duration_microseconds_count{handler="flags"} 0
+http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
+http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
+http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
+http_request_duration_microseconds_sum{handler="graph"} 5803.93
+http_request_duration_microseconds_count{handler="graph"} 3
+http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="heap"} 0
+http_request_duration_microseconds_count{handler="heap"} 0
+http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
+http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
+http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
+http_request_duration_microseconds_sum{handler="label_values"} 3995.574
+http_request_duration_microseconds_count{handler="label_values"} 3
+http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="options"} 0
+http_request_duration_microseconds_count{handler="options"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
+http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
+http_request_duration_microseconds_count{handler="prometheus"} 462
+http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
+http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
+http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
+http_request_duration_microseconds_sum{handler="query"} 26074.11
+http_request_duration_microseconds_count{handler="query"} 6
+http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="query_range"} 0
+http_request_duration_microseconds_count{handler="query_range"} 0
+http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="rules"} 0
+http_request_duration_microseconds_count{handler="rules"} 0
+http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="series"} 0
+http_request_duration_microseconds_count{handler="series"} 0
+http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
+http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
+http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
+http_request_duration_microseconds_sum{handler="static"} 6458.621
+http_request_duration_microseconds_count{handler="static"} 3
+http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="status"} 0
+http_request_duration_microseconds_count{handler="status"} 0
+http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="targets"} 0
+http_request_duration_microseconds_count{handler="targets"} 0
+http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="version"} 0
+http_request_duration_microseconds_count{handler="version"} 0
+http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="alerts"} 0
+http_request_size_bytes_count{handler="alerts"} 0
+http_request_size_bytes{handler="config",quantile="0.5"} NaN
+http_request_size_bytes{handler="config",quantile="0.9"} NaN
+http_request_size_bytes{handler="config",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="config"} 0
+http_request_size_bytes_count{handler="config"} 0
+http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="consoles"} 0
+http_request_size_bytes_count{handler="consoles"} 0
+http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="drop_series"} 0
+http_request_size_bytes_count{handler="drop_series"} 0
+http_request_size_bytes{handler="federate",quantile="0.5"} NaN
+http_request_size_bytes{handler="federate",quantile="0.9"} NaN
+http_request_size_bytes{handler="federate",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="federate"} 0
+http_request_size_bytes_count{handler="federate"} 0
+http_request_size_bytes{handler="flags",quantile="0.5"} NaN
+http_request_size_bytes{handler="flags",quantile="0.9"} NaN
+http_request_size_bytes{handler="flags",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="flags"} 0
+http_request_size_bytes_count{handler="flags"} 0
+http_request_size_bytes{handler="graph",quantile="0.5"} 367
+http_request_size_bytes{handler="graph",quantile="0.9"} 389
+http_request_size_bytes{handler="graph",quantile="0.99"} 389
+http_request_size_bytes_sum{handler="graph"} 1145
+http_request_size_bytes_count{handler="graph"} 3
+http_request_size_bytes{handler="heap",quantile="0.5"} NaN
+http_request_size_bytes{handler="heap",quantile="0.9"} NaN
+http_request_size_bytes{handler="heap",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="heap"} 0
+http_request_size_bytes_count{handler="heap"} 0
+http_request_size_bytes{handler="label_values",quantile="0.5"} 416
+http_request_size_bytes{handler="label_values",quantile="0.9"} 416
+http_request_size_bytes{handler="label_values",quantile="0.99"} 416
+http_request_size_bytes_sum{handler="label_values"} 1248
+http_request_size_bytes_count{handler="label_values"} 3
+http_request_size_bytes{handler="options",quantile="0.5"} NaN
+http_request_size_bytes{handler="options",quantile="0.9"} NaN
+http_request_size_bytes{handler="options",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="options"} 0
+http_request_size_bytes_count{handler="options"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
+http_request_size_bytes_sum{handler="prometheus"} 109956
+http_request_size_bytes_count{handler="prometheus"} 462
+http_request_size_bytes{handler="query",quantile="0.5"} 531
+http_request_size_bytes{handler="query",quantile="0.9"} 531
+http_request_size_bytes{handler="query",quantile="0.99"} 531
+http_request_size_bytes_sum{handler="query"} 3186
+http_request_size_bytes_count{handler="query"} 6
+http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="query_range"} 0
+http_request_size_bytes_count{handler="query_range"} 0
+http_request_size_bytes{handler="rules",quantile="0.5"} NaN
+http_request_size_bytes{handler="rules",quantile="0.9"} NaN
+http_request_size_bytes{handler="rules",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="rules"} 0
+http_request_size_bytes_count{handler="rules"} 0
+http_request_size_bytes{handler="series",quantile="0.5"} NaN
+http_request_size_bytes{handler="series",quantile="0.9"} NaN
+http_request_size_bytes{handler="series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="series"} 0
+http_request_size_bytes_count{handler="series"} 0
+http_request_size_bytes{handler="static",quantile="0.5"} 379
+http_request_size_bytes{handler="static",quantile="0.9"} 379
+http_request_size_bytes{handler="static",quantile="0.99"} 379
+http_request_size_bytes_sum{handler="static"} 1137
+http_request_size_bytes_count{handler="static"} 3
+http_request_size_bytes{handler="status",quantile="0.5"} NaN
+http_request_size_bytes{handler="status",quantile="0.9"} NaN
+http_request_size_bytes{handler="status",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="status"} 0
+http_request_size_bytes_count{handler="status"} 0
+http_request_size_bytes{handler="targets",quantile="0.5"} NaN
+http_request_size_bytes{handler="targets",quantile="0.9"} NaN
+http_request_size_bytes{handler="targets",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="targets"} 0
+http_request_size_bytes_count{handler="targets"} 0
+http_request_size_bytes{handler="version",quantile="0.5"} NaN
+http_request_size_bytes{handler="version",quantile="0.9"} NaN
+http_request_size_bytes{handler="version",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="version"} 0
+http_request_size_bytes_count{handler="version"} 0
+http_requests_total{code="200",handler="graph",method="get"} 3
+http_requests_total{code="200",handler="label_values",method="get"} 3
+http_requests_total{code="200",handler="prometheus",method="get"} 462
+http_requests_total{code="200",handler="query",method="get"} 6
+http_requests_total{code="200",handler="static",method="get"} 3
+http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="alerts"} 0
+http_response_size_bytes_count{handler="alerts"} 0
+http_response_size_bytes{handler="config",quantile="0.5"} NaN
+http_response_size_bytes{handler="config",quantile="0.9"} NaN
+http_response_size_bytes{handler="config",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="config"} 0
+http_response_size_bytes_count{handler="config"} 0
+http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="consoles"} 0
+http_response_size_bytes_count{handler="consoles"} 0
+http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="drop_series"} 0
+http_response_size_bytes_count{handler="drop_series"} 0
+http_response_size_bytes{handler="federate",quantile="0.5"} NaN
+http_response_size_bytes{handler="federate",quantile="0.9"} NaN
+http_response_size_bytes{handler="federate",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="federate"} 0
+http_response_size_bytes_count{handler="federate"} 0
+http_response_size_bytes{handler="flags",quantile="0.5"} NaN
+http_response_size_bytes{handler="flags",quantile="0.9"} NaN
+http_response_size_bytes{handler="flags",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="flags"} 0
+http_response_size_bytes_count{handler="flags"} 0
+http_response_size_bytes{handler="graph",quantile="0.5"} 3619
+http_response_size_bytes{handler="graph",quantile="0.9"} 3619
+http_response_size_bytes{handler="graph",quantile="0.99"} 3619
+http_response_size_bytes_sum{handler="graph"} 10857
+http_response_size_bytes_count{handler="graph"} 3
+http_response_size_bytes{handler="heap",quantile="0.5"} NaN
+http_response_size_bytes{handler="heap",quantile="0.9"} NaN
+http_response_size_bytes{handler="heap",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="heap"} 0
+http_response_size_bytes_count{handler="heap"} 0
+http_response_size_bytes{handler="label_values",quantile="0.5"} 642
+http_response_size_bytes{handler="label_values",quantile="0.9"} 642
+http_response_size_bytes{handler="label_values",quantile="0.99"} 642
+http_response_size_bytes_sum{handler="label_values"} 1926
+http_response_size_bytes_count{handler="label_values"} 3
+http_response_size_bytes{handler="options",quantile="0.5"} NaN
+http_response_size_bytes{handler="options",quantile="0.9"} NaN
+http_response_size_bytes{handler="options",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="options"} 0
+http_response_size_bytes_count{handler="options"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
+http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
+http_response_size_bytes_count{handler="prometheus"} 462
+http_response_size_bytes{handler="query",quantile="0.5"} 776
+http_response_size_bytes{handler="query",quantile="0.9"} 781
+http_response_size_bytes{handler="query",quantile="0.99"} 781
+http_response_size_bytes_sum{handler="query"} 4656
+http_response_size_bytes_count{handler="query"} 6
+http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="query_range"} 0
+http_response_size_bytes_count{handler="query_range"} 0
+http_response_size_bytes{handler="rules",quantile="0.5"} NaN
+http_response_size_bytes{handler="rules",quantile="0.9"} NaN
+http_response_size_bytes{handler="rules",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="rules"} 0
+http_response_size_bytes_count{handler="rules"} 0
+http_response_size_bytes{handler="series",quantile="0.5"} NaN
+http_response_size_bytes{handler="series",quantile="0.9"} NaN
+http_response_size_bytes{handler="series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="series"} 0
+http_response_size_bytes_count{handler="series"} 0
+http_response_size_bytes{handler="static",quantile="0.5"} 6316
+http_response_size_bytes{handler="static",quantile="0.9"} 6316
+http_response_size_bytes{handler="static",quantile="0.99"} 6316
+http_response_size_bytes_sum{handler="static"} 18948
+http_response_size_bytes_count{handler="static"} 3
+http_response_size_bytes{handler="status",quantile="0.5"} NaN
+http_response_size_bytes{handler="status",quantile="0.9"} NaN
+http_response_size_bytes{handler="status",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="status"} 0
+http_response_size_bytes_count{handler="status"} 0
+http_response_size_bytes{handler="targets",quantile="0.5"} NaN
+http_response_size_bytes{handler="targets",quantile="0.9"} NaN
+http_response_size_bytes{handler="targets",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="targets"} 0
+http_response_size_bytes_count{handler="targets"} 0
+http_response_size_bytes{handler="version",quantile="0.5"} NaN
+http_response_size_bytes{handler="version",quantile="0.9"} NaN
+http_response_size_bytes{handler="version",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="version"} 0
+http_response_size_bytes_count{handler="version"} 0
+prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
+prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
+prometheus_config_last_reload_successful 1
+prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_count 1
+prometheus_evaluator_iterations_skipped_total 0
+prometheus_notifications_dropped_total 0
+prometheus_notifications_queue_capacity 10000
+prometheus_notifications_queue_length 0
+prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
+prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_azure_refresh_duration_seconds_sum 0
+prometheus_sd_azure_refresh_duration_seconds_count 0
+prometheus_sd_azure_refresh_failures_total 0
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_failures_total 0
+prometheus_sd_dns_lookup_failures_total 0
+prometheus_sd_dns_lookups_total 0
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_ec2_refresh_duration_seconds_sum 0
+prometheus_sd_ec2_refresh_duration_seconds_count 0
+prometheus_sd_ec2_refresh_failures_total 0
+prometheus_sd_file_read_errors_total 0
+prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_file_scan_duration_seconds_sum 0
+prometheus_sd_file_scan_duration_seconds_count 0
+prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
+prometheus_sd_gce_refresh_duration_sum 0
+prometheus_sd_gce_refresh_duration_count 0
+prometheus_sd_gce_refresh_failures_total 0
+prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_marathon_refresh_duration_seconds_sum 0
+prometheus_sd_marathon_refresh_duration_seconds_count 0
+prometheus_sd_marathon_refresh_failures_total 0
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
+prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
+prometheus_target_interval_length_seconds_count{interval="50ms"} 685
+prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
+prometheus_target_skipped_scrapes_total 0
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
+prometheus_treecache_watcher_goroutines 0
+prometheus_treecache_zookeeper_failures_total 0
+# EOF
diff -pruN 2.31.2+ds1-1/model/textparse/promtestdata.txt 2.33.5+ds1-2/model/textparse/promtestdata.txt
--- 2.31.2+ds1-1/model/textparse/promtestdata.txt	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/promtestdata.txt	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,529 @@
+# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 4.9351e-05
+go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
+go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
+go_gc_duration_seconds{quantile="0.75"} 0.000106744
+go_gc_duration_seconds{quantile="1"} 0.002072195
+go_gc_duration_seconds_sum 0.012139815
+go_gc_duration_seconds_count 99
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 33
+# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
+# TYPE go_memstats_alloc_bytes gauge
+go_memstats_alloc_bytes 1.7518624e+07
+# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
+# TYPE go_memstats_alloc_bytes_total counter
+go_memstats_alloc_bytes_total 8.3062296e+08
+# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
+# TYPE go_memstats_buck_hash_sys_bytes gauge
+go_memstats_buck_hash_sys_bytes 1.494637e+06
+# HELP go_memstats_frees_total Total number of frees.
+# TYPE go_memstats_frees_total counter
+go_memstats_frees_total 4.65658e+06
+# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
+# TYPE go_memstats_gc_sys_bytes gauge
+go_memstats_gc_sys_bytes 1.107968e+06
+# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
+# TYPE go_memstats_heap_alloc_bytes gauge
+go_memstats_heap_alloc_bytes 1.7518624e+07
+# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
+# TYPE go_memstats_heap_idle_bytes gauge
+go_memstats_heap_idle_bytes 6.668288e+06
+# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
+# TYPE go_memstats_heap_inuse_bytes gauge
+go_memstats_heap_inuse_bytes 1.8956288e+07
+# HELP go_memstats_heap_objects Number of allocated objects.
+# TYPE go_memstats_heap_objects gauge
+go_memstats_heap_objects 72755
+# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
+# TYPE go_memstats_heap_released_bytes_total counter
+go_memstats_heap_released_bytes_total 0
+# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
+# TYPE go_memstats_heap_sys_bytes gauge
+go_memstats_heap_sys_bytes 2.5624576e+07
+# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
+# TYPE go_memstats_last_gc_time_seconds gauge
+go_memstats_last_gc_time_seconds 1.4843955586166437e+09
+# HELP go_memstats_lookups_total Total number of pointer lookups.
+# TYPE go_memstats_lookups_total counter
+go_memstats_lookups_total 2089
+# HELP go_memstats_mallocs_total Total number of mallocs.
+# TYPE go_memstats_mallocs_total counter
+go_memstats_mallocs_total 4.729335e+06
+# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
+# TYPE go_memstats_mcache_inuse_bytes gauge
+go_memstats_mcache_inuse_bytes 9600
+# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
+# TYPE go_memstats_mcache_sys_bytes gauge
+go_memstats_mcache_sys_bytes 16384
+# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
+# TYPE go_memstats_mspan_inuse_bytes gauge
+go_memstats_mspan_inuse_bytes 211520
+# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
+# TYPE go_memstats_mspan_sys_bytes gauge
+go_memstats_mspan_sys_bytes 245760
+# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
+# TYPE go_memstats_next_gc_bytes gauge
+go_memstats_next_gc_bytes 2.033527e+07
+# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
+# TYPE go_memstats_other_sys_bytes gauge
+go_memstats_other_sys_bytes 2.077323e+06
+# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
+# TYPE go_memstats_stack_inuse_bytes gauge
+go_memstats_stack_inuse_bytes 1.6384e+06
+# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
+# TYPE go_memstats_stack_sys_bytes gauge
+go_memstats_stack_sys_bytes 1.6384e+06
+# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
+# TYPE go_memstats_sys_bytes gauge
+go_memstats_sys_bytes 3.2205048e+07
+# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
+# TYPE http_request_duration_microseconds summary
+http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="alerts"} 0
+http_request_duration_microseconds_count{handler="alerts"} 0
+http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="config"} 0
+http_request_duration_microseconds_count{handler="config"} 0
+http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="consoles"} 0
+http_request_duration_microseconds_count{handler="consoles"} 0
+http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="drop_series"} 0
+http_request_duration_microseconds_count{handler="drop_series"} 0
+http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="federate"} 0
+http_request_duration_microseconds_count{handler="federate"} 0
+http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="flags"} 0
+http_request_duration_microseconds_count{handler="flags"} 0
+http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
+http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
+http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
+http_request_duration_microseconds_sum{handler="graph"} 5803.93
+http_request_duration_microseconds_count{handler="graph"} 3
+http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="heap"} 0
+http_request_duration_microseconds_count{handler="heap"} 0
+http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
+http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
+http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
+http_request_duration_microseconds_sum{handler="label_values"} 3995.574
+http_request_duration_microseconds_count{handler="label_values"} 3
+http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="options"} 0
+http_request_duration_microseconds_count{handler="options"} 0
+http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
+http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
+http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
+http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
+http_request_duration_microseconds_count{handler="prometheus"} 462
+http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
+http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
+http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
+http_request_duration_microseconds_sum{handler="query"} 26074.11
+http_request_duration_microseconds_count{handler="query"} 6
+http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="query_range"} 0
+http_request_duration_microseconds_count{handler="query_range"} 0
+http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="rules"} 0
+http_request_duration_microseconds_count{handler="rules"} 0
+http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="series"} 0
+http_request_duration_microseconds_count{handler="series"} 0
+http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
+http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
+http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
+http_request_duration_microseconds_sum{handler="static"} 6458.621
+http_request_duration_microseconds_count{handler="static"} 3
+http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="status"} 0
+http_request_duration_microseconds_count{handler="status"} 0
+http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="targets"} 0
+http_request_duration_microseconds_count{handler="targets"} 0
+http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
+http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
+http_request_duration_microseconds_sum{handler="version"} 0
+http_request_duration_microseconds_count{handler="version"} 0
+# HELP http_request_size_bytes The HTTP request sizes in bytes.
+# TYPE http_request_size_bytes summary
+http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="alerts"} 0
+http_request_size_bytes_count{handler="alerts"} 0
+http_request_size_bytes{handler="config",quantile="0.5"} NaN
+http_request_size_bytes{handler="config",quantile="0.9"} NaN
+http_request_size_bytes{handler="config",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="config"} 0
+http_request_size_bytes_count{handler="config"} 0
+http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="consoles"} 0
+http_request_size_bytes_count{handler="consoles"} 0
+http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="drop_series"} 0
+http_request_size_bytes_count{handler="drop_series"} 0
+http_request_size_bytes{handler="federate",quantile="0.5"} NaN
+http_request_size_bytes{handler="federate",quantile="0.9"} NaN
+http_request_size_bytes{handler="federate",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="federate"} 0
+http_request_size_bytes_count{handler="federate"} 0
+http_request_size_bytes{handler="flags",quantile="0.5"} NaN
+http_request_size_bytes{handler="flags",quantile="0.9"} NaN
+http_request_size_bytes{handler="flags",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="flags"} 0
+http_request_size_bytes_count{handler="flags"} 0
+http_request_size_bytes{handler="graph",quantile="0.5"} 367
+http_request_size_bytes{handler="graph",quantile="0.9"} 389
+http_request_size_bytes{handler="graph",quantile="0.99"} 389
+http_request_size_bytes_sum{handler="graph"} 1145
+http_request_size_bytes_count{handler="graph"} 3
+http_request_size_bytes{handler="heap",quantile="0.5"} NaN
+http_request_size_bytes{handler="heap",quantile="0.9"} NaN
+http_request_size_bytes{handler="heap",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="heap"} 0
+http_request_size_bytes_count{handler="heap"} 0
+http_request_size_bytes{handler="label_values",quantile="0.5"} 416
+http_request_size_bytes{handler="label_values",quantile="0.9"} 416
+http_request_size_bytes{handler="label_values",quantile="0.99"} 416
+http_request_size_bytes_sum{handler="label_values"} 1248
+http_request_size_bytes_count{handler="label_values"} 3
+http_request_size_bytes{handler="options",quantile="0.5"} NaN
+http_request_size_bytes{handler="options",quantile="0.9"} NaN
+http_request_size_bytes{handler="options",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="options"} 0
+http_request_size_bytes_count{handler="options"} 0
+http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
+http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
+http_request_size_bytes_sum{handler="prometheus"} 109956
+http_request_size_bytes_count{handler="prometheus"} 462
+http_request_size_bytes{handler="query",quantile="0.5"} 531
+http_request_size_bytes{handler="query",quantile="0.9"} 531
+http_request_size_bytes{handler="query",quantile="0.99"} 531
+http_request_size_bytes_sum{handler="query"} 3186
+http_request_size_bytes_count{handler="query"} 6
+http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="query_range"} 0
+http_request_size_bytes_count{handler="query_range"} 0
+http_request_size_bytes{handler="rules",quantile="0.5"} NaN
+http_request_size_bytes{handler="rules",quantile="0.9"} NaN
+http_request_size_bytes{handler="rules",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="rules"} 0
+http_request_size_bytes_count{handler="rules"} 0
+http_request_size_bytes{handler="series",quantile="0.5"} NaN
+http_request_size_bytes{handler="series",quantile="0.9"} NaN
+http_request_size_bytes{handler="series",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="series"} 0
+http_request_size_bytes_count{handler="series"} 0
+http_request_size_bytes{handler="static",quantile="0.5"} 379
+http_request_size_bytes{handler="static",quantile="0.9"} 379
+http_request_size_bytes{handler="static",quantile="0.99"} 379
+http_request_size_bytes_sum{handler="static"} 1137
+http_request_size_bytes_count{handler="static"} 3
+http_request_size_bytes{handler="status",quantile="0.5"} NaN
+http_request_size_bytes{handler="status",quantile="0.9"} NaN
+http_request_size_bytes{handler="status",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="status"} 0
+http_request_size_bytes_count{handler="status"} 0
+http_request_size_bytes{handler="targets",quantile="0.5"} NaN
+http_request_size_bytes{handler="targets",quantile="0.9"} NaN
+http_request_size_bytes{handler="targets",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="targets"} 0
+http_request_size_bytes_count{handler="targets"} 0
+http_request_size_bytes{handler="version",quantile="0.5"} NaN
+http_request_size_bytes{handler="version",quantile="0.9"} NaN
+http_request_size_bytes{handler="version",quantile="0.99"} NaN
+http_request_size_bytes_sum{handler="version"} 0
+http_request_size_bytes_count{handler="version"} 0
+# HELP http_requests_total Total number of HTTP requests made.
+# TYPE http_requests_total counter
+http_requests_total{code="200",handler="graph",method="get"} 3
+http_requests_total{code="200",handler="label_values",method="get"} 3
+http_requests_total{code="200",handler="prometheus",method="get"} 462
+http_requests_total{code="200",handler="query",method="get"} 6
+http_requests_total{code="200",handler="static",method="get"} 3
+# HELP http_response_size_bytes The HTTP response sizes in bytes.
+# TYPE http_response_size_bytes summary
+http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
+http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="alerts"} 0
+http_response_size_bytes_count{handler="alerts"} 0
+http_response_size_bytes{handler="config",quantile="0.5"} NaN
+http_response_size_bytes{handler="config",quantile="0.9"} NaN
+http_response_size_bytes{handler="config",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="config"} 0
+http_response_size_bytes_count{handler="config"} 0
+http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
+http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="consoles"} 0
+http_response_size_bytes_count{handler="consoles"} 0
+http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
+http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="drop_series"} 0
+http_response_size_bytes_count{handler="drop_series"} 0
+http_response_size_bytes{handler="federate",quantile="0.5"} NaN
+http_response_size_bytes{handler="federate",quantile="0.9"} NaN
+http_response_size_bytes{handler="federate",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="federate"} 0
+http_response_size_bytes_count{handler="federate"} 0
+http_response_size_bytes{handler="flags",quantile="0.5"} NaN
+http_response_size_bytes{handler="flags",quantile="0.9"} NaN
+http_response_size_bytes{handler="flags",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="flags"} 0
+http_response_size_bytes_count{handler="flags"} 0
+http_response_size_bytes{handler="graph",quantile="0.5"} 3619
+http_response_size_bytes{handler="graph",quantile="0.9"} 3619
+http_response_size_bytes{handler="graph",quantile="0.99"} 3619
+http_response_size_bytes_sum{handler="graph"} 10857
+http_response_size_bytes_count{handler="graph"} 3
+http_response_size_bytes{handler="heap",quantile="0.5"} NaN
+http_response_size_bytes{handler="heap",quantile="0.9"} NaN
+http_response_size_bytes{handler="heap",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="heap"} 0
+http_response_size_bytes_count{handler="heap"} 0
+http_response_size_bytes{handler="label_values",quantile="0.5"} 642
+http_response_size_bytes{handler="label_values",quantile="0.9"} 642
+http_response_size_bytes{handler="label_values",quantile="0.99"} 642
+http_response_size_bytes_sum{handler="label_values"} 1926
+http_response_size_bytes_count{handler="label_values"} 3
+http_response_size_bytes{handler="options",quantile="0.5"} NaN
+http_response_size_bytes{handler="options",quantile="0.9"} NaN
+http_response_size_bytes{handler="options",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="options"} 0
+http_response_size_bytes_count{handler="options"} 0
+http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
+http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
+http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
+http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
+http_response_size_bytes_count{handler="prometheus"} 462
+http_response_size_bytes{handler="query",quantile="0.5"} 776
+http_response_size_bytes{handler="query",quantile="0.9"} 781
+http_response_size_bytes{handler="query",quantile="0.99"} 781
+http_response_size_bytes_sum{handler="query"} 4656
+http_response_size_bytes_count{handler="query"} 6
+http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
+http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="query_range"} 0
+http_response_size_bytes_count{handler="query_range"} 0
+http_response_size_bytes{handler="rules",quantile="0.5"} NaN
+http_response_size_bytes{handler="rules",quantile="0.9"} NaN
+http_response_size_bytes{handler="rules",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="rules"} 0
+http_response_size_bytes_count{handler="rules"} 0
+http_response_size_bytes{handler="series",quantile="0.5"} NaN
+http_response_size_bytes{handler="series",quantile="0.9"} NaN
+http_response_size_bytes{handler="series",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="series"} 0
+http_response_size_bytes_count{handler="series"} 0
+http_response_size_bytes{handler="static",quantile="0.5"} 6316
+http_response_size_bytes{handler="static",quantile="0.9"} 6316
+http_response_size_bytes{handler="static",quantile="0.99"} 6316
+http_response_size_bytes_sum{handler="static"} 18948
+http_response_size_bytes_count{handler="static"} 3
+http_response_size_bytes{handler="status",quantile="0.5"} NaN
+http_response_size_bytes{handler="status",quantile="0.9"} NaN
+http_response_size_bytes{handler="status",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="status"} 0
+http_response_size_bytes_count{handler="status"} 0
+http_response_size_bytes{handler="targets",quantile="0.5"} NaN
+http_response_size_bytes{handler="targets",quantile="0.9"} NaN
+http_response_size_bytes{handler="targets",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="targets"} 0
+http_response_size_bytes_count{handler="targets"} 0
+http_response_size_bytes{handler="version",quantile="0.5"} NaN
+http_response_size_bytes{handler="version",quantile="0.9"} NaN
+http_response_size_bytes{handler="version",quantile="0.99"} NaN
+http_response_size_bytes_sum{handler="version"} 0
+http_response_size_bytes_count{handler="version"} 0
+# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built.
+# TYPE prometheus_build_info gauge
+prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
+# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
+# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge
+prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
+# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful.
+# TYPE prometheus_config_last_reload_successful gauge
+prometheus_config_last_reload_successful 1
+# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations.
+# TYPE prometheus_evaluator_duration_seconds summary
+prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
+prometheus_evaluator_duration_seconds_count 1
+# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage.
+# TYPE prometheus_evaluator_iterations_skipped_total counter
+prometheus_evaluator_iterations_skipped_total 0
+# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration.
+# TYPE prometheus_notifications_dropped_total counter
+prometheus_notifications_dropped_total 0
+# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
+# TYPE prometheus_notifications_queue_capacity gauge
+prometheus_notifications_queue_capacity 10000
+# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
+# TYPE prometheus_notifications_queue_length gauge
+prometheus_notifications_queue_length 0
+# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
+# TYPE prometheus_rule_evaluation_failures_total counter
+prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
+prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
+# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds.
+# TYPE prometheus_sd_azure_refresh_duration_seconds summary
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_azure_refresh_duration_seconds_sum 0
+prometheus_sd_azure_refresh_duration_seconds_count 0
+# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures.
+# TYPE prometheus_sd_azure_refresh_failures_total counter
+prometheus_sd_azure_refresh_failures_total 0
+# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds.
+# TYPE prometheus_sd_consul_rpc_duration_seconds summary
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
+prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
+prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
+prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
+# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures.
+# TYPE prometheus_sd_consul_rpc_failures_total counter
+prometheus_sd_consul_rpc_failures_total 0
+# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures.
+# TYPE prometheus_sd_dns_lookup_failures_total counter
+prometheus_sd_dns_lookup_failures_total 0
+# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups.
+# TYPE prometheus_sd_dns_lookups_total counter
+prometheus_sd_dns_lookups_total 0
+# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds.
+# TYPE prometheus_sd_ec2_refresh_duration_seconds summary
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_ec2_refresh_duration_seconds_sum 0
+prometheus_sd_ec2_refresh_duration_seconds_count 0
+# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures.
+# TYPE prometheus_sd_ec2_refresh_failures_total counter
+prometheus_sd_ec2_refresh_failures_total 0
+# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors.
+# TYPE prometheus_sd_file_read_errors_total counter
+prometheus_sd_file_read_errors_total 0
+# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds.
+# TYPE prometheus_sd_file_scan_duration_seconds summary
+prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_file_scan_duration_seconds_sum 0
+prometheus_sd_file_scan_duration_seconds_count 0
+# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds.
+# TYPE prometheus_sd_gce_refresh_duration summary
+prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
+prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
+prometheus_sd_gce_refresh_duration_sum 0
+prometheus_sd_gce_refresh_duration_count 0
+# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures.
+# TYPE prometheus_sd_gce_refresh_failures_total counter
+prometheus_sd_gce_refresh_failures_total 0
+# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled.
+# TYPE prometheus_sd_kubernetes_events_total counter
+prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
+prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
+# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds.
+# TYPE prometheus_sd_marathon_refresh_duration_seconds summary
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
+prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
+prometheus_sd_marathon_refresh_duration_seconds_sum 0
+prometheus_sd_marathon_refresh_duration_seconds_count 0
+# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures.
+# TYPE prometheus_sd_marathon_refresh_failures_total counter
+prometheus_sd_marathon_refresh_failures_total 0
+# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
+# TYPE prometheus_target_interval_length_seconds summary
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
+prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
+prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
+prometheus_target_interval_length_seconds_count{interval="50ms"} 685
+# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool.
+# TYPE prometheus_target_scrape_pool_sync_total counter
+prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
+# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled.
+# TYPE prometheus_target_skipped_scrapes_total counter
+prometheus_target_skipped_scrapes_total 0
+# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool.
+# TYPE prometheus_target_sync_length_seconds summary
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
+prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
+prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
+# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines.
+# TYPE prometheus_treecache_watcher_goroutines gauge
+prometheus_treecache_watcher_goroutines 0
+# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures.
+# TYPE prometheus_treecache_zookeeper_failures_total counter
+prometheus_treecache_zookeeper_failures_total 0
+# EOF
diff -pruN 2.31.2+ds1-1/model/textparse/README.md 2.33.5+ds1-2/model/textparse/README.md
--- 2.31.2+ds1-1/model/textparse/README.md	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/textparse/README.md	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,6 @@
+# Making changes to textparse lexers
+In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command: 
+`golex -o=promlex.l.go promlex.l`
+
+Note that you need golex installed: 
+`go get -u modernc.org/golex`
\ No newline at end of file
diff -pruN 2.31.2+ds1-1/model/timestamp/timestamp.go 2.33.5+ds1-2/model/timestamp/timestamp.go
--- 2.31.2+ds1-1/model/timestamp/timestamp.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/timestamp/timestamp.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,34 @@
+// Copyright 2017 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package timestamp
+
+import (
+	"math"
+	"time"
+)
+
+// FromTime returns a new millisecond timestamp from a time.
+func FromTime(t time.Time) int64 {
+	return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond)
+}
+
+// Time returns a new time.Time object from a millisecond timestamp.
+func Time(ts int64) time.Time {
+	return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC()
+}
+
+// FromFloatSeconds returns a millisecond timestamp from float seconds.
+func FromFloatSeconds(ts float64) int64 {
+	return int64(math.Round(ts * 1000))
+}
diff -pruN 2.31.2+ds1-1/model/value/value.go 2.33.5+ds1-2/model/value/value.go
--- 2.31.2+ds1-1/model/value/value.go	1970-01-01 00:00:00.000000000 +0000
+++ 2.33.5+ds1-2/model/value/value.go	2022-03-08 16:34:32.000000000 +0000
@@ -0,0 +1,34 @@
+// Copyright 2016 The Prometheus Authors
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package value
+
+import (
+	"math"
+)
+
+const (
+	// NormalNaN is a quiet NaN. This is also math.NaN().
+	NormalNaN uint64 = 0x7ff8000000000001
+
+	// StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0.
+	// This value is chosen with many leading 0s, so we have scope to store more
+	// complicated values in the future. It is 2 rather than 1 to make
+	// it easier to distinguish from the NormalNaN by a human when debugging.
+	StaleNaN uint64 = 0x7ff0000000000002
+)
+
+// IsStaleNaN returns true when the provided NaN value is a stale marker.
+func IsStaleNaN(v float64) bool {
+	return math.Float64bits(v) == StaleNaN
+}
diff -pruN 2.31.2+ds1-1/NOTICE 2.33.5+ds1-2/NOTICE
--- 2.31.2+ds1-1/NOTICE	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/NOTICE	2022-03-08 16:34:32.000000000 +0000
@@ -91,6 +91,11 @@ https://github.com/dgryski/go-tsz
 Copyright (c) 2015,2016 Damian Gryski <damian@gryski.com>
 See https://github.com/dgryski/go-tsz/blob/master/LICENSE for license details.
 
+The Go programming language
+https://go.dev/
+Copyright (c) 2009 The Go Authors
+See https://go.dev/LICENSE for license details.
+
 The Codicon icon font from Microsoft
 https://github.com/microsoft/vscode-codicons
 Copyright (c) Microsoft Corporation and other contributors
diff -pruN 2.31.2+ds1-1/notifier/notifier.go 2.33.5+ds1-2/notifier/notifier.go
--- 2.31.2+ds1-1/notifier/notifier.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/notifier/notifier.go	2022-03-08 16:34:32.000000000 +0000
@@ -41,8 +41,8 @@ import (
 
 	"github.com/prometheus/prometheus/config"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/relabel"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/relabel"
 )
 
 const (
@@ -303,7 +303,6 @@ func (n *Manager) nextBatch() []*Alert {
 
 // Run dispatches notifications continuously.
 func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
-
 	for {
 		select {
 		case <-n.ctx.Done():
@@ -602,7 +601,7 @@ func (n *Manager) Stop() {
 	n.cancel()
 }
 
-// alertmanager holds Alertmanager endpoint information.
+// Alertmanager holds Alertmanager endpoint information.
 type alertmanager interface {
 	url() *url.URL
 }
@@ -654,7 +653,7 @@ func (s *alertmanagerSet) sync(tgs []*ta
 	allDroppedAms := []alertmanager{}
 
 	for _, tg := range tgs {
-		ams, droppedAms, err := alertmanagerFromGroup(tg, s.cfg)
+		ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg)
 		if err != nil {
 			level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err)
 			continue
@@ -691,9 +690,9 @@ func postPath(pre string, v config.Alert
 	return path.Join("/", pre, alertPushEndpoint)
 }
 
-// alertmanagerFromGroup extracts a list of alertmanagers from a target group
+// AlertmanagerFromGroup extracts a list of alertmanagers from a target group
 // and an associated AlertmanagerConfig.
-func alertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) {
+func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) {
 	var res []alertmanager
 	var droppedAlertManagers []alertmanager
 
diff -pruN 2.31.2+ds1-1/notifier/notifier_test.go 2.33.5+ds1-2/notifier/notifier_test.go
--- 2.31.2+ds1-1/notifier/notifier_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/notifier/notifier_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -35,12 +35,12 @@ import (
 
 	"github.com/prometheus/prometheus/config"
 	"github.com/prometheus/prometheus/discovery/targetgroup"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/relabel"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/relabel"
 )
 
 func TestPostPath(t *testing.T) {
-	var cases = []struct {
+	cases := []struct {
 		in, out string
 	}{
 		{
@@ -447,7 +447,7 @@ func (a alertmanagerMock) url() *url.URL
 
 func TestLabelSetNotReused(t *testing.T) {
 	tg := makeInputTargetGroup()
-	_, _, err := alertmanagerFromGroup(tg, &config.AlertmanagerConfig{})
+	_, _, err := AlertmanagerFromGroup(tg, &config.AlertmanagerConfig{})
 
 	require.NoError(t, err)
 
@@ -456,7 +456,7 @@ func TestLabelSetNotReused(t *testing.T)
 }
 
 func TestReload(t *testing.T) {
-	var tests = []struct {
+	tests := []struct {
 		in  *targetgroup.Group
 		out string
 	}{
@@ -500,11 +500,10 @@ alerting:
 
 		require.Equal(t, tt.out, res)
 	}
-
 }
 
 func TestDroppedAlertmanagers(t *testing.T) {
-	var tests = []struct {
+	tests := []struct {
 		in  *targetgroup.Group
 		out string
 	}{
diff -pruN 2.31.2+ds1-1/pkg/exemplar/exemplar.go 2.33.5+ds1-2/pkg/exemplar/exemplar.go
--- 2.31.2+ds1-1/pkg/exemplar/exemplar.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/exemplar/exemplar.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,50 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package exemplar
-
-import "github.com/prometheus/prometheus/pkg/labels"
-
-// The combined length of the label names and values of an Exemplar's LabelSet MUST NOT exceed 128 UTF-8 characters
-// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#exemplars
-const ExemplarMaxLabelSetLength = 128
-
-// Exemplar is additional information associated with a time series.
-type Exemplar struct {
-	Labels labels.Labels `json:"labels"`
-	Value  float64       `json:"value"`
-	Ts     int64         `json:"timestamp"`
-	HasTs  bool
-}
-
-type QueryResult struct {
-	SeriesLabels labels.Labels `json:"seriesLabels"`
-	Exemplars    []Exemplar    `json:"exemplars"`
-}
-
-// Equals compares if the exemplar e is the same as e2. Note that if HasTs is false for
-// both exemplars then the timestamps will be ignored for the comparison. This can come up
-// when an exemplar is exported without it's own timestamp, in which case the scrape timestamp
-// is assigned to the Ts field. However we still want to treat the same exemplar, scraped without
-// an exported timestamp, as a duplicate of itself for each subsequent scrape.
-func (e Exemplar) Equals(e2 Exemplar) bool {
-	if !labels.Equal(e.Labels, e2.Labels) {
-		return false
-	}
-
-	if (e.HasTs || e2.HasTs) && e.Ts != e2.Ts {
-		return false
-	}
-
-	return e.Value == e2.Value
-}
diff -pruN 2.31.2+ds1-1/pkg/gate/gate.go 2.33.5+ds1-2/pkg/gate/gate.go
--- 2.31.2+ds1-1/pkg/gate/gate.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/gate/gate.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package gate
-
-import "context"
-
-// A Gate controls the maximum number of concurrently running and waiting queries.
-type Gate struct {
-	ch chan struct{}
-}
-
-// New returns a query gate that limits the number of queries
-// being concurrently executed.
-func New(length int) *Gate {
-	return &Gate{
-		ch: make(chan struct{}, length),
-	}
-}
-
-// Start blocks until the gate has a free spot or the context is done.
-func (g *Gate) Start(ctx context.Context) error {
-	select {
-	case <-ctx.Done():
-		return ctx.Err()
-	case g.ch <- struct{}{}:
-		return nil
-	}
-}
-
-// Done releases a single spot in the gate.
-func (g *Gate) Done() {
-	select {
-	case <-g.ch:
-	default:
-		panic("gate.Done: more operations done than started")
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/labels.go 2.33.5+ds1-2/pkg/labels/labels.go
--- 2.31.2+ds1-1/pkg/labels/labels.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/labels.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,474 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"bytes"
-	"encoding/json"
-	"sort"
-	"strconv"
-
-	"github.com/cespare/xxhash/v2"
-)
-
-// Well-known label names used by Prometheus components.
-const (
-	MetricName   = "__name__"
-	AlertName    = "alertname"
-	BucketLabel  = "le"
-	InstanceName = "instance"
-
-	labelSep = '\xfe'
-)
-
-var seps = []byte{'\xff'}
-
-// Label is a key/value pair of strings.
-type Label struct {
-	Name, Value string
-}
-
-// Labels is a sorted set of labels. Order has to be guaranteed upon
-// instantiation.
-type Labels []Label
-
-func (ls Labels) Len() int           { return len(ls) }
-func (ls Labels) Swap(i, j int)      { ls[i], ls[j] = ls[j], ls[i] }
-func (ls Labels) Less(i, j int) bool { return ls[i].Name < ls[j].Name }
-
-func (ls Labels) String() string {
-	var b bytes.Buffer
-
-	b.WriteByte('{')
-	for i, l := range ls {
-		if i > 0 {
-			b.WriteByte(',')
-			b.WriteByte(' ')
-		}
-		b.WriteString(l.Name)
-		b.WriteByte('=')
-		b.WriteString(strconv.Quote(l.Value))
-	}
-	b.WriteByte('}')
-	return b.String()
-}
-
-// Bytes returns ls as a byte slice.
-// It uses an byte invalid character as a separator and so should not be used for printing.
-func (ls Labels) Bytes(buf []byte) []byte {
-	b := bytes.NewBuffer(buf[:0])
-	b.WriteByte(labelSep)
-	for i, l := range ls {
-		if i > 0 {
-			b.WriteByte(seps[0])
-		}
-		b.WriteString(l.Name)
-		b.WriteByte(seps[0])
-		b.WriteString(l.Value)
-	}
-	return b.Bytes()
-}
-
-// MarshalJSON implements json.Marshaler.
-func (ls Labels) MarshalJSON() ([]byte, error) {
-	return json.Marshal(ls.Map())
-}
-
-// UnmarshalJSON implements json.Unmarshaler.
-func (ls *Labels) UnmarshalJSON(b []byte) error {
-	var m map[string]string
-
-	if err := json.Unmarshal(b, &m); err != nil {
-		return err
-	}
-
-	*ls = FromMap(m)
-	return nil
-}
-
-// MarshalYAML implements yaml.Marshaler.
-func (ls Labels) MarshalYAML() (interface{}, error) {
-	return ls.Map(), nil
-}
-
-// UnmarshalYAML implements yaml.Unmarshaler.
-func (ls *Labels) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	var m map[string]string
-
-	if err := unmarshal(&m); err != nil {
-		return err
-	}
-
-	*ls = FromMap(m)
-	return nil
-}
-
-// MatchLabels returns a subset of Labels that matches/does not match with the provided label names based on the 'on' boolean.
-// If on is set to true, it returns the subset of labels that match with the provided label names and its inverse when 'on' is set to false.
-func (ls Labels) MatchLabels(on bool, names ...string) Labels {
-	matchedLabels := Labels{}
-
-	nameSet := map[string]struct{}{}
-	for _, n := range names {
-		nameSet[n] = struct{}{}
-	}
-
-	for _, v := range ls {
-		if _, ok := nameSet[v.Name]; on == ok && (on || v.Name != MetricName) {
-			matchedLabels = append(matchedLabels, v)
-		}
-	}
-
-	return matchedLabels
-}
-
-// Hash returns a hash value for the label set.
-func (ls Labels) Hash() uint64 {
-	// Use xxhash.Sum64(b) for fast path as it's faster.
-	b := make([]byte, 0, 1024)
-	for i, v := range ls {
-		if len(b)+len(v.Name)+len(v.Value)+2 >= cap(b) {
-			// If labels entry is 1KB+ do not allocate whole entry.
-			h := xxhash.New()
-			_, _ = h.Write(b)
-			for _, v := range ls[i:] {
-				_, _ = h.WriteString(v.Name)
-				_, _ = h.Write(seps)
-				_, _ = h.WriteString(v.Value)
-				_, _ = h.Write(seps)
-			}
-			return h.Sum64()
-		}
-
-		b = append(b, v.Name...)
-		b = append(b, seps[0])
-		b = append(b, v.Value...)
-		b = append(b, seps[0])
-	}
-	return xxhash.Sum64(b)
-}
-
-// HashForLabels returns a hash value for the labels matching the provided names.
-// 'names' have to be sorted in ascending order.
-func (ls Labels) HashForLabels(b []byte, names ...string) (uint64, []byte) {
-	b = b[:0]
-	i, j := 0, 0
-	for i < len(ls) && j < len(names) {
-		if names[j] < ls[i].Name {
-			j++
-		} else if ls[i].Name < names[j] {
-			i++
-		} else {
-			b = append(b, ls[i].Name...)
-			b = append(b, seps[0])
-			b = append(b, ls[i].Value...)
-			b = append(b, seps[0])
-			i++
-			j++
-		}
-	}
-	return xxhash.Sum64(b), b
-}
-
-// HashWithoutLabels returns a hash value for all labels except those matching
-// the provided names.
-// 'names' have to be sorted in ascending order.
-func (ls Labels) HashWithoutLabels(b []byte, names ...string) (uint64, []byte) {
-	b = b[:0]
-	j := 0
-	for i := range ls {
-		for j < len(names) && names[j] < ls[i].Name {
-			j++
-		}
-		if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
-			continue
-		}
-		b = append(b, ls[i].Name...)
-		b = append(b, seps[0])
-		b = append(b, ls[i].Value...)
-		b = append(b, seps[0])
-	}
-	return xxhash.Sum64(b), b
-}
-
-// WithLabels returns a new labels.Labels from ls that only contains labels matching names.
-// 'names' have to be sorted in ascending order.
-func (ls Labels) WithLabels(names ...string) Labels {
-	ret := make([]Label, 0, len(ls))
-
-	i, j := 0, 0
-	for i < len(ls) && j < len(names) {
-		if names[j] < ls[i].Name {
-			j++
-		} else if ls[i].Name < names[j] {
-			i++
-		} else {
-			ret = append(ret, ls[i])
-			i++
-			j++
-		}
-	}
-	return ret
-}
-
-// WithoutLabels returns a new labels.Labels from ls that contains labels not matching names.
-// 'names' have to be sorted in ascending order.
-func (ls Labels) WithoutLabels(names ...string) Labels {
-	ret := make([]Label, 0, len(ls))
-
-	j := 0
-	for i := range ls {
-		for j < len(names) && names[j] < ls[i].Name {
-			j++
-		}
-		if ls[i].Name == MetricName || (j < len(names) && ls[i].Name == names[j]) {
-			continue
-		}
-		ret = append(ret, ls[i])
-	}
-	return ret
-}
-
-// Copy returns a copy of the labels.
-func (ls Labels) Copy() Labels {
-	res := make(Labels, len(ls))
-	copy(res, ls)
-	return res
-}
-
-// Get returns the value for the label with the given name.
-// Returns an empty string if the label doesn't exist.
-func (ls Labels) Get(name string) string {
-	for _, l := range ls {
-		if l.Name == name {
-			return l.Value
-		}
-	}
-	return ""
-}
-
-// Has returns true if the label with the given name is present.
-func (ls Labels) Has(name string) bool {
-	for _, l := range ls {
-		if l.Name == name {
-			return true
-		}
-	}
-	return false
-}
-
-// HasDuplicateLabelNames returns whether ls has duplicate label names.
-// It assumes that the labelset is sorted.
-func (ls Labels) HasDuplicateLabelNames() (string, bool) {
-	for i, l := range ls {
-		if i == 0 {
-			continue
-		}
-		if l.Name == ls[i-1].Name {
-			return l.Name, true
-		}
-	}
-	return "", false
-}
-
-// WithoutEmpty returns the labelset without empty labels.
-// May return the same labelset.
-func (ls Labels) WithoutEmpty() Labels {
-	for _, v := range ls {
-		if v.Value != "" {
-			continue
-		}
-		// Do not copy the slice until it's necessary.
-		els := make(Labels, 0, len(ls)-1)
-		for _, v := range ls {
-			if v.Value != "" {
-				els = append(els, v)
-			}
-		}
-		return els
-	}
-	return ls
-}
-
-// Equal returns whether the two label sets are equal.
-func Equal(ls, o Labels) bool {
-	if len(ls) != len(o) {
-		return false
-	}
-	for i, l := range ls {
-		if l.Name != o[i].Name || l.Value != o[i].Value {
-			return false
-		}
-	}
-	return true
-}
-
-// Map returns a string map of the labels.
-func (ls Labels) Map() map[string]string {
-	m := make(map[string]string, len(ls))
-	for _, l := range ls {
-		m[l.Name] = l.Value
-	}
-	return m
-}
-
-// New returns a sorted Labels from the given labels.
-// The caller has to guarantee that all label names are unique.
-func New(ls ...Label) Labels {
-	set := make(Labels, 0, len(ls))
-	for _, l := range ls {
-		set = append(set, l)
-	}
-	sort.Sort(set)
-
-	return set
-}
-
-// FromMap returns new sorted Labels from the given map.
-func FromMap(m map[string]string) Labels {
-	l := make([]Label, 0, len(m))
-	for k, v := range m {
-		l = append(l, Label{Name: k, Value: v})
-	}
-	return New(l...)
-}
-
-// FromStrings creates new labels from pairs of strings.
-func FromStrings(ss ...string) Labels {
-	if len(ss)%2 != 0 {
-		panic("invalid number of strings")
-	}
-	var res Labels
-	for i := 0; i < len(ss); i += 2 {
-		res = append(res, Label{Name: ss[i], Value: ss[i+1]})
-	}
-
-	sort.Sort(res)
-	return res
-}
-
-// Compare compares the two label sets.
-// The result will be 0 if a==b, <0 if a < b, and >0 if a > b.
-func Compare(a, b Labels) int {
-	l := len(a)
-	if len(b) < l {
-		l = len(b)
-	}
-
-	for i := 0; i < l; i++ {
-		if a[i].Name != b[i].Name {
-			if a[i].Name < b[i].Name {
-				return -1
-			}
-			return 1
-		}
-		if a[i].Value != b[i].Value {
-			if a[i].Value < b[i].Value {
-				return -1
-			}
-			return 1
-		}
-	}
-	// If all labels so far were in common, the set with fewer labels comes first.
-	return len(a) - len(b)
-}
-
-// Builder allows modifying Labels.
-type Builder struct {
-	base Labels
-	del  []string
-	add  []Label
-}
-
-// NewBuilder returns a new LabelsBuilder.
-func NewBuilder(base Labels) *Builder {
-	b := &Builder{
-		del: make([]string, 0, 5),
-		add: make([]Label, 0, 5),
-	}
-	b.Reset(base)
-	return b
-}
-
-// Reset clears all current state for the builder.
-func (b *Builder) Reset(base Labels) {
-	b.base = base
-	b.del = b.del[:0]
-	b.add = b.add[:0]
-	for _, l := range b.base {
-		if l.Value == "" {
-			b.del = append(b.del, l.Name)
-		}
-	}
-}
-
-// Del deletes the label of the given name.
-func (b *Builder) Del(ns ...string) *Builder {
-	for _, n := range ns {
-		for i, a := range b.add {
-			if a.Name == n {
-				b.add = append(b.add[:i], b.add[i+1:]...)
-			}
-		}
-		b.del = append(b.del, n)
-	}
-	return b
-}
-
-// Set the name/value pair as a label.
-func (b *Builder) Set(n, v string) *Builder {
-	if v == "" {
-		// Empty labels are the same as missing labels.
-		return b.Del(n)
-	}
-	for i, a := range b.add {
-		if a.Name == n {
-			b.add[i].Value = v
-			return b
-		}
-	}
-	b.add = append(b.add, Label{Name: n, Value: v})
-
-	return b
-}
-
-// Labels returns the labels from the builder. If no modifications
-// were made, the original labels are returned.
-func (b *Builder) Labels() Labels {
-	if len(b.del) == 0 && len(b.add) == 0 {
-		return b.base
-	}
-
-	// In the general case, labels are removed, modified or moved
-	// rather than added.
-	res := make(Labels, 0, len(b.base))
-Outer:
-	for _, l := range b.base {
-		for _, n := range b.del {
-			if l.Name == n {
-				continue Outer
-			}
-		}
-		for _, la := range b.add {
-			if l.Name == la.Name {
-				continue Outer
-			}
-		}
-		res = append(res, l)
-	}
-	res = append(res, b.add...)
-	sort.Sort(res)
-
-	return res
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/labels_test.go 2.33.5+ds1-2/pkg/labels/labels_test.go
--- 2.31.2+ds1-1/pkg/labels/labels_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/labels_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,737 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"fmt"
-	"strings"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestLabels_String(t *testing.T) {
-	cases := []struct {
-		lables   Labels
-		expected string
-	}{
-		{
-			lables: Labels{
-				{
-					Name:  "t1",
-					Value: "t1",
-				},
-				{
-					Name:  "t2",
-					Value: "t2",
-				},
-			},
-			expected: "{t1=\"t1\", t2=\"t2\"}",
-		},
-		{
-			lables:   Labels{},
-			expected: "{}",
-		},
-		{
-			lables:   nil,
-			expected: "{}",
-		},
-	}
-	for _, c := range cases {
-		str := c.lables.String()
-		require.Equal(t, c.expected, str)
-	}
-}
-
-func TestLabels_MatchLabels(t *testing.T) {
-	labels := Labels{
-		{
-			Name:  "__name__",
-			Value: "ALERTS",
-		},
-		{
-			Name:  "alertname",
-			Value: "HTTPRequestRateLow",
-		},
-		{
-			Name:  "alertstate",
-			Value: "pending",
-		},
-		{
-			Name:  "instance",
-			Value: "0",
-		},
-		{
-			Name:  "job",
-			Value: "app-server",
-		},
-		{
-			Name:  "severity",
-			Value: "critical",
-		},
-	}
-
-	tests := []struct {
-		providedNames []string
-		on            bool
-		expected      Labels
-	}{
-		// on = true, explicitly including metric name in matching.
-		{
-			providedNames: []string{
-				"__name__",
-				"alertname",
-				"alertstate",
-				"instance",
-			},
-			on: true,
-			expected: Labels{
-				{
-					Name:  "__name__",
-					Value: "ALERTS",
-				},
-				{
-					Name:  "alertname",
-					Value: "HTTPRequestRateLow",
-				},
-				{
-					Name:  "alertstate",
-					Value: "pending",
-				},
-				{
-					Name:  "instance",
-					Value: "0",
-				},
-			},
-		},
-		// on = false, explicitly excluding metric name from matching.
-		{
-			providedNames: []string{
-				"__name__",
-				"alertname",
-				"alertstate",
-				"instance",
-			},
-			on: false,
-			expected: Labels{
-				{
-					Name:  "job",
-					Value: "app-server",
-				},
-				{
-					Name:  "severity",
-					Value: "critical",
-				},
-			},
-		},
-		// on = true, explicitly excluding metric name from matching.
-		{
-			providedNames: []string{
-				"alertname",
-				"alertstate",
-				"instance",
-			},
-			on: true,
-			expected: Labels{
-				{
-					Name:  "alertname",
-					Value: "HTTPRequestRateLow",
-				},
-				{
-					Name:  "alertstate",
-					Value: "pending",
-				},
-				{
-					Name:  "instance",
-					Value: "0",
-				},
-			},
-		},
-		// on = false, implicitly excluding metric name from matching.
-		{
-			providedNames: []string{
-				"alertname",
-				"alertstate",
-				"instance",
-			},
-			on: false,
-			expected: Labels{
-				{
-					Name:  "job",
-					Value: "app-server",
-				},
-				{
-					Name:  "severity",
-					Value: "critical",
-				},
-			},
-		},
-	}
-
-	for i, test := range tests {
-		got := labels.MatchLabels(test.on, test.providedNames...)
-		require.Equal(t, test.expected, got, "unexpected labelset for test case %d", i)
-	}
-}
-
-func TestLabels_HasDuplicateLabelNames(t *testing.T) {
-	cases := []struct {
-		Input     Labels
-		Duplicate bool
-		LabelName string
-	}{
-		{
-			Input:     FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}),
-			Duplicate: false,
-		}, {
-			Input: append(
-				FromMap(map[string]string{"__name__": "up", "hostname": "localhost"}),
-				FromMap(map[string]string{"hostname": "127.0.0.1"})...,
-			),
-			Duplicate: true,
-			LabelName: "hostname",
-		},
-	}
-
-	for i, c := range cases {
-		l, d := c.Input.HasDuplicateLabelNames()
-		require.Equal(t, c.Duplicate, d, "test %d: incorrect duplicate bool", i)
-		require.Equal(t, c.LabelName, l, "test %d: incorrect label name", i)
-	}
-}
-
-func TestLabels_WithoutEmpty(t *testing.T) {
-	for _, test := range []struct {
-		input    Labels
-		expected Labels
-	}{
-		{
-			input: Labels{
-				{Name: "foo"},
-				{Name: "bar"},
-			},
-			expected: Labels{},
-		},
-		{
-			input: Labels{
-				{Name: "foo"},
-				{Name: "bar"},
-				{Name: "baz"},
-			},
-			expected: Labels{},
-		},
-		{
-			input: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "job", Value: "check"},
-			},
-			expected: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "job", Value: "check"},
-			},
-		},
-		{
-			input: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "bar"},
-				{Name: "job", Value: "check"},
-			},
-			expected: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "job", Value: "check"},
-			},
-		},
-		{
-			input: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "foo"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "bar"},
-				{Name: "job", Value: "check"},
-			},
-			expected: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "job", Value: "check"},
-			},
-		},
-		{
-			input: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "foo"},
-				{Name: "baz"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "bar"},
-				{Name: "job", Value: "check"},
-			},
-			expected: Labels{
-				{Name: "__name__", Value: "test"},
-				{Name: "hostname", Value: "localhost"},
-				{Name: "job", Value: "check"},
-			},
-		},
-	} {
-		t.Run("", func(t *testing.T) {
-			require.Equal(t, test.expected, test.input.WithoutEmpty())
-		})
-	}
-}
-
-func TestLabels_Equal(t *testing.T) {
-	labels := Labels{
-		{
-			Name:  "aaa",
-			Value: "111",
-		},
-		{
-			Name:  "bbb",
-			Value: "222",
-		},
-	}
-
-	tests := []struct {
-		compared Labels
-		expected bool
-	}{
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbb",
-					Value: "222",
-				},
-				{
-					Name:  "ccc",
-					Value: "333",
-				},
-			},
-			expected: false,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bar",
-					Value: "222",
-				},
-			},
-			expected: false,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbb",
-					Value: "233",
-				},
-			},
-			expected: false,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbb",
-					Value: "222",
-				},
-			},
-			expected: true,
-		},
-	}
-
-	for i, test := range tests {
-		got := Equal(labels, test.compared)
-		require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
-	}
-}
-
-func TestLabels_FromStrings(t *testing.T) {
-	labels := FromStrings("aaa", "111", "bbb", "222")
-	expected := Labels{
-		{
-			Name:  "aaa",
-			Value: "111",
-		},
-		{
-			Name:  "bbb",
-			Value: "222",
-		},
-	}
-
-	require.Equal(t, expected, labels, "unexpected labelset")
-
-	require.Panics(t, func() { FromStrings("aaa", "111", "bbb") }) //nolint:staticcheck // Ignore SA5012, error is intentional test.
-}
-
-func TestLabels_Compare(t *testing.T) {
-	labels := Labels{
-		{
-			Name:  "aaa",
-			Value: "111",
-		},
-		{
-			Name:  "bbb",
-			Value: "222",
-		},
-	}
-
-	tests := []struct {
-		compared Labels
-		expected int
-	}{
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "110",
-				},
-				{
-					Name:  "bbb",
-					Value: "222",
-				},
-			},
-			expected: 1,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbb",
-					Value: "233",
-				},
-			},
-			expected: -1,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bar",
-					Value: "222",
-				},
-			},
-			expected: 1,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbc",
-					Value: "222",
-				},
-			},
-			expected: -1,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-			},
-			expected: 1,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbb",
-					Value: "222",
-				},
-				{
-					Name:  "ccc",
-					Value: "333",
-				},
-				{
-					Name:  "ddd",
-					Value: "444",
-				},
-			},
-			expected: -2,
-		},
-		{
-			compared: Labels{
-				{
-					Name:  "aaa",
-					Value: "111",
-				},
-				{
-					Name:  "bbb",
-					Value: "222",
-				},
-			},
-			expected: 0,
-		},
-	}
-
-	for i, test := range tests {
-		got := Compare(labels, test.compared)
-		require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
-	}
-}
-
-func TestLabels_Has(t *testing.T) {
-	tests := []struct {
-		input    string
-		expected bool
-	}{
-		{
-			input:    "foo",
-			expected: false,
-		},
-		{
-			input:    "aaa",
-			expected: true,
-		},
-	}
-
-	labelsSet := Labels{
-		{
-			Name:  "aaa",
-			Value: "111",
-		},
-		{
-			Name:  "bbb",
-			Value: "222",
-		},
-	}
-
-	for i, test := range tests {
-		got := labelsSet.Has(test.input)
-		require.Equal(t, test.expected, got, "unexpected comparison result for test case %d", i)
-	}
-}
-
-func TestLabels_Get(t *testing.T) {
-	require.Equal(t, "", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("foo"))
-	require.Equal(t, "111", Labels{{"aaa", "111"}, {"bbb", "222"}}.Get("aaa"))
-}
-
-// BenchmarkLabels_Get was written to check whether a binary search can improve the performance vs the linear search implementation
-// The results have shown that binary search would only be better when searching last labels in scenarios with more than 10 labels.
-// In the following list, `old` is the linear search while `new` is the binary search implementaiton (without calling sort.Search, which performs even worse here)
-// name                                        old time/op    new time/op    delta
-// Labels_Get/with_5_labels/get_first_label      5.12ns ± 0%   14.24ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_5_labels/get_middle_label     13.5ns ± 0%    18.5ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_5_labels/get_last_label       21.9ns ± 0%    18.9ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_10_labels/get_first_label     5.11ns ± 0%   19.47ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_10_labels/get_middle_label    26.2ns ± 0%    19.3ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_10_labels/get_last_label      42.8ns ± 0%    23.4ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_30_labels/get_first_label     5.10ns ± 0%   24.63ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_30_labels/get_middle_label    75.8ns ± 0%    29.7ns ± 0%   ~     (p=1.000 n=1+1)
-// Labels_Get/with_30_labels/get_last_label       169ns ± 0%      29ns ± 0%   ~     (p=1.000 n=1+1)
-func BenchmarkLabels_Get(b *testing.B) {
-	maxLabels := 30
-	allLabels := make(Labels, maxLabels)
-	for i := 0; i < maxLabels; i++ {
-		allLabels[i] = Label{Name: strings.Repeat(string('a'+byte(i)), 5)}
-	}
-	for _, size := range []int{5, 10, maxLabels} {
-		b.Run(fmt.Sprintf("with %d labels", size), func(b *testing.B) {
-			labels := allLabels[:size]
-			for _, scenario := range []struct {
-				desc, label string
-			}{
-				{"get first label", labels[0].Name},
-				{"get middle label", labels[size/2].Name},
-				{"get last label", labels[size-1].Name},
-			} {
-				b.Run(scenario.desc, func(b *testing.B) {
-					b.ResetTimer()
-					for i := 0; i < b.N; i++ {
-						_ = labels.Get(scenario.label)
-					}
-				})
-			}
-		})
-	}
-}
-
-func TestLabels_Copy(t *testing.T) {
-	require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Copy())
-}
-
-func TestLabels_Map(t *testing.T) {
-	require.Equal(t, map[string]string{"aaa": "111", "bbb": "222"}, Labels{{"aaa", "111"}, {"bbb", "222"}}.Map())
-}
-
-func TestLabels_WithLabels(t *testing.T) {
-	require.Equal(t, Labels{{"aaa", "111"}, {"bbb", "222"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithLabels("aaa", "bbb"))
-}
-
-func TestLabels_WithoutLabels(t *testing.T) {
-	require.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}}.WithoutLabels("bbb", "ccc"))
-	require.Equal(t, Labels{{"aaa", "111"}}, Labels{{"aaa", "111"}, {"bbb", "222"}, {MetricName, "333"}}.WithoutLabels("bbb"))
-}
-
-func TestBulider_NewBulider(t *testing.T) {
-	require.Equal(
-		t,
-		&Builder{
-			base: Labels{{"aaa", "111"}},
-			del:  []string{},
-			add:  []Label{},
-		},
-		NewBuilder(Labels{{"aaa", "111"}}),
-	)
-}
-
-func TestBuilder_Del(t *testing.T) {
-	require.Equal(
-		t,
-		&Builder{
-			del: []string{"bbb"},
-			add: []Label{{"aaa", "111"}, {"ccc", "333"}},
-		},
-		(&Builder{
-			del: []string{},
-			add: []Label{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
-		}).Del("bbb"),
-	)
-}
-
-func TestBuilder_Set(t *testing.T) {
-	require.Equal(
-		t,
-		&Builder{
-			base: Labels{{"aaa", "111"}},
-			del:  []string{},
-			add:  []Label{{"bbb", "222"}},
-		},
-		(&Builder{
-			base: Labels{{"aaa", "111"}},
-			del:  []string{},
-			add:  []Label{},
-		}).Set("bbb", "222"),
-	)
-
-	require.Equal(
-		t,
-		&Builder{
-			base: Labels{{"aaa", "111"}},
-			del:  []string{},
-			add:  []Label{{"bbb", "333"}},
-		},
-		(&Builder{
-			base: Labels{{"aaa", "111"}},
-			del:  []string{},
-			add:  []Label{{"bbb", "222"}},
-		}).Set("bbb", "333"),
-	)
-}
-
-func TestBuilder_Labels(t *testing.T) {
-	require.Equal(
-		t,
-		Labels{{"aaa", "111"}, {"ccc", "333"}, {"ddd", "444"}},
-		(&Builder{
-			base: Labels{{"aaa", "111"}, {"bbb", "222"}, {"ccc", "333"}},
-			del:  []string{"bbb"},
-			add:  []Label{{"ddd", "444"}},
-		}).Labels(),
-	)
-}
-
-func TestLabels_Hash(t *testing.T) {
-	lbls := Labels{
-		{Name: "foo", Value: "bar"},
-		{Name: "baz", Value: "qux"},
-	}
-	require.Equal(t, lbls.Hash(), lbls.Hash())
-	require.NotEqual(t, lbls.Hash(), Labels{lbls[1], lbls[0]}.Hash(), "unordered labels match.")
-	require.NotEqual(t, lbls.Hash(), Labels{lbls[0]}.Hash(), "different labels match.")
-}
-
-var benchmarkLabelsResult uint64
-
-func BenchmarkLabels_Hash(b *testing.B) {
-	for _, tcase := range []struct {
-		name string
-		lbls Labels
-	}{
-		{
-			name: "typical labels under 1KB",
-			lbls: func() Labels {
-				lbls := make(Labels, 10)
-				for i := 0; i < len(lbls); i++ {
-					// Label ~20B name, 50B value.
-					lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
-				}
-				return lbls
-			}(),
-		},
-		{
-			name: "bigger labels over 1KB",
-			lbls: func() Labels {
-				lbls := make(Labels, 10)
-				for i := 0; i < len(lbls); i++ {
-					//Label ~50B name, 50B value.
-					lbls[i] = Label{Name: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i), Value: fmt.Sprintf("abcdefghijabcdefghijabcdefghijabcdefghijabcdefghij%d", i)}
-				}
-				return lbls
-			}(),
-		},
-		{
-			name: "extremely large label value 10MB",
-			lbls: func() Labels {
-				lbl := &strings.Builder{}
-				lbl.Grow(1024 * 1024 * 10) // 10MB.
-				word := "abcdefghij"
-				for i := 0; i < lbl.Cap()/len(word); i++ {
-					_, _ = lbl.WriteString(word)
-				}
-				return Labels{{Name: "__name__", Value: lbl.String()}}
-			}(),
-		},
-	} {
-		b.Run(tcase.name, func(b *testing.B) {
-			var h uint64
-
-			b.ReportAllocs()
-			b.ResetTimer()
-			for i := 0; i < b.N; i++ {
-				h = tcase.lbls.Hash()
-			}
-			benchmarkLabelsResult = h
-		})
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/matcher.go 2.33.5+ds1-2/pkg/labels/matcher.go
--- 2.31.2+ds1-1/pkg/labels/matcher.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/matcher.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,120 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"fmt"
-)
-
-// MatchType is an enum for label matching types.
-type MatchType int
-
-// Possible MatchTypes.
-const (
-	MatchEqual MatchType = iota
-	MatchNotEqual
-	MatchRegexp
-	MatchNotRegexp
-)
-
-var matchTypeToStr = [...]string{
-	MatchEqual:     "=",
-	MatchNotEqual:  "!=",
-	MatchRegexp:    "=~",
-	MatchNotRegexp: "!~",
-}
-
-func (m MatchType) String() string {
-	if m < MatchEqual || m > MatchNotRegexp {
-		panic("unknown match type")
-	}
-	return matchTypeToStr[m]
-}
-
-// Matcher models the matching of a label.
-type Matcher struct {
-	Type  MatchType
-	Name  string
-	Value string
-
-	re *FastRegexMatcher
-}
-
-// NewMatcher returns a matcher object.
-func NewMatcher(t MatchType, n, v string) (*Matcher, error) {
-	m := &Matcher{
-		Type:  t,
-		Name:  n,
-		Value: v,
-	}
-	if t == MatchRegexp || t == MatchNotRegexp {
-		re, err := NewFastRegexMatcher(v)
-		if err != nil {
-			return nil, err
-		}
-		m.re = re
-	}
-	return m, nil
-}
-
-// MustNewMatcher panics on error - only for use in tests!
-func MustNewMatcher(mt MatchType, name, val string) *Matcher {
-	m, err := NewMatcher(mt, name, val)
-	if err != nil {
-		panic(err)
-	}
-	return m
-}
-
-func (m *Matcher) String() string {
-	return fmt.Sprintf("%s%s%q", m.Name, m.Type, m.Value)
-}
-
-// Matches returns whether the matcher matches the given string value.
-func (m *Matcher) Matches(s string) bool {
-	switch m.Type {
-	case MatchEqual:
-		return s == m.Value
-	case MatchNotEqual:
-		return s != m.Value
-	case MatchRegexp:
-		return m.re.MatchString(s)
-	case MatchNotRegexp:
-		return !m.re.MatchString(s)
-	}
-	panic("labels.Matcher.Matches: invalid match type")
-}
-
-// Inverse returns a matcher that matches the opposite.
-func (m *Matcher) Inverse() (*Matcher, error) {
-	switch m.Type {
-	case MatchEqual:
-		return NewMatcher(MatchNotEqual, m.Name, m.Value)
-	case MatchNotEqual:
-		return NewMatcher(MatchEqual, m.Name, m.Value)
-	case MatchRegexp:
-		return NewMatcher(MatchNotRegexp, m.Name, m.Value)
-	case MatchNotRegexp:
-		return NewMatcher(MatchRegexp, m.Name, m.Value)
-	}
-	panic("labels.Matcher.Matches: invalid match type")
-}
-
-// GetRegexString returns the regex string.
-func (m *Matcher) GetRegexString() string {
-	if m.re == nil {
-		return ""
-	}
-	return m.re.GetRegexString()
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/matcher_test.go 2.33.5+ds1-2/pkg/labels/matcher_test.go
--- 2.31.2+ds1-1/pkg/labels/matcher_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/matcher_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,125 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func mustNewMatcher(t *testing.T, mType MatchType, value string) *Matcher {
-	m, err := NewMatcher(mType, "", value)
-	require.NoError(t, err)
-	return m
-}
-
-func TestMatcher(t *testing.T) {
-	tests := []struct {
-		matcher *Matcher
-		value   string
-		match   bool
-	}{
-		{
-			matcher: mustNewMatcher(t, MatchEqual, "bar"),
-			value:   "bar",
-			match:   true,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchEqual, "bar"),
-			value:   "foo-bar",
-			match:   false,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchNotEqual, "bar"),
-			value:   "bar",
-			match:   false,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchNotEqual, "bar"),
-			value:   "foo-bar",
-			match:   true,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchRegexp, "bar"),
-			value:   "bar",
-			match:   true,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchRegexp, "bar"),
-			value:   "foo-bar",
-			match:   false,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchRegexp, ".*bar"),
-			value:   "foo-bar",
-			match:   true,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchNotRegexp, "bar"),
-			value:   "bar",
-			match:   false,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchNotRegexp, "bar"),
-			value:   "foo-bar",
-			match:   true,
-		},
-		{
-			matcher: mustNewMatcher(t, MatchNotRegexp, ".*bar"),
-			value:   "foo-bar",
-			match:   false,
-		},
-	}
-
-	for _, test := range tests {
-		require.Equal(t, test.matcher.Matches(test.value), test.match)
-	}
-}
-
-func TestInverse(t *testing.T) {
-	tests := []struct {
-		matcher  *Matcher
-		expected *Matcher
-	}{
-		{
-			matcher:  &Matcher{Type: MatchEqual, Name: "name1", Value: "value1"},
-			expected: &Matcher{Type: MatchNotEqual, Name: "name1", Value: "value1"},
-		},
-		{
-			matcher:  &Matcher{Type: MatchNotEqual, Name: "name2", Value: "value2"},
-			expected: &Matcher{Type: MatchEqual, Name: "name2", Value: "value2"},
-		},
-		{
-			matcher:  &Matcher{Type: MatchRegexp, Name: "name3", Value: "value3"},
-			expected: &Matcher{Type: MatchNotRegexp, Name: "name3", Value: "value3"},
-		},
-		{
-			matcher:  &Matcher{Type: MatchNotRegexp, Name: "name4", Value: "value4"},
-			expected: &Matcher{Type: MatchRegexp, Name: "name4", Value: "value4"},
-		},
-	}
-
-	for _, test := range tests {
-		result, err := test.matcher.Inverse()
-		require.NoError(t, err)
-		require.Equal(t, test.expected.Type, result.Type)
-	}
-}
-
-func BenchmarkMatchType_String(b *testing.B) {
-	for i := 0; i <= b.N; i++ {
-		_ = MatchType(i % int(MatchNotRegexp+1)).String()
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/regexp.go 2.33.5+ds1-2/pkg/labels/regexp.go
--- 2.31.2+ds1-1/pkg/labels/regexp.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/regexp.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,107 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"regexp"
-	"regexp/syntax"
-	"strings"
-)
-
-type FastRegexMatcher struct {
-	re       *regexp.Regexp
-	prefix   string
-	suffix   string
-	contains string
-}
-
-func NewFastRegexMatcher(v string) (*FastRegexMatcher, error) {
-	re, err := regexp.Compile("^(?:" + v + ")$")
-	if err != nil {
-		return nil, err
-	}
-
-	parsed, err := syntax.Parse(v, syntax.Perl)
-	if err != nil {
-		return nil, err
-	}
-
-	m := &FastRegexMatcher{
-		re: re,
-	}
-
-	if parsed.Op == syntax.OpConcat {
-		m.prefix, m.suffix, m.contains = optimizeConcatRegex(parsed)
-	}
-
-	return m, nil
-}
-
-func (m *FastRegexMatcher) MatchString(s string) bool {
-	if m.prefix != "" && !strings.HasPrefix(s, m.prefix) {
-		return false
-	}
-	if m.suffix != "" && !strings.HasSuffix(s, m.suffix) {
-		return false
-	}
-	if m.contains != "" && !strings.Contains(s, m.contains) {
-		return false
-	}
-	return m.re.MatchString(s)
-}
-
-func (m *FastRegexMatcher) GetRegexString() string {
-	return m.re.String()
-}
-
-// optimizeConcatRegex returns literal prefix/suffix text that can be safely
-// checked against the label value before running the regexp matcher.
-func optimizeConcatRegex(r *syntax.Regexp) (prefix, suffix, contains string) {
-	sub := r.Sub
-
-	// We can safely remove begin and end text matchers respectively
-	// at the beginning and end of the regexp.
-	if len(sub) > 0 && sub[0].Op == syntax.OpBeginText {
-		sub = sub[1:]
-	}
-	if len(sub) > 0 && sub[len(sub)-1].Op == syntax.OpEndText {
-		sub = sub[:len(sub)-1]
-	}
-
-	if len(sub) == 0 {
-		return
-	}
-
-	// Given Prometheus regex matchers are always anchored to the begin/end
-	// of the text, if the first/last operations are literals, we can safely
-	// treat them as prefix/suffix.
-	if sub[0].Op == syntax.OpLiteral && (sub[0].Flags&syntax.FoldCase) == 0 {
-		prefix = string(sub[0].Rune)
-	}
-	if last := len(sub) - 1; sub[last].Op == syntax.OpLiteral && (sub[last].Flags&syntax.FoldCase) == 0 {
-		suffix = string(sub[last].Rune)
-	}
-
-	// If contains any literal which is not a prefix/suffix, we keep the
-	// 1st one. We do not keep the whole list of literals to simplify the
-	// fast path.
-	for i := 1; i < len(sub)-1; i++ {
-		if sub[i].Op == syntax.OpLiteral && (sub[i].Flags&syntax.FoldCase) == 0 {
-			contains = string(sub[i].Rune)
-			break
-		}
-	}
-
-	return
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/regexp_test.go 2.33.5+ds1-2/pkg/labels/regexp_test.go
--- 2.31.2+ds1-1/pkg/labels/regexp_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/regexp_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,98 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"regexp/syntax"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestNewFastRegexMatcher(t *testing.T) {
-	cases := []struct {
-		regex    string
-		value    string
-		expected bool
-	}{
-		{regex: "(foo|bar)", value: "foo", expected: true},
-		{regex: "(foo|bar)", value: "foo bar", expected: false},
-		{regex: "(foo|bar)", value: "bar", expected: true},
-		{regex: "foo.*", value: "foo bar", expected: true},
-		{regex: "foo.*", value: "bar foo", expected: false},
-		{regex: ".*foo", value: "foo bar", expected: false},
-		{regex: ".*foo", value: "bar foo", expected: true},
-		{regex: ".*foo", value: "foo", expected: true},
-		{regex: "^.*foo$", value: "foo", expected: true},
-		{regex: "^.+foo$", value: "foo", expected: false},
-		{regex: "^.+foo$", value: "bfoo", expected: true},
-		{regex: ".*", value: "\n", expected: false},
-		{regex: ".*", value: "\nfoo", expected: false},
-		{regex: ".*foo", value: "\nfoo", expected: false},
-		{regex: "foo.*", value: "foo\n", expected: false},
-		{regex: "foo\n.*", value: "foo\n", expected: true},
-		{regex: ".*foo.*", value: "foo", expected: true},
-		{regex: ".*foo.*", value: "foo bar", expected: true},
-		{regex: ".*foo.*", value: "hello foo world", expected: true},
-		{regex: ".*foo.*", value: "hello foo\n world", expected: false},
-		{regex: ".*foo\n.*", value: "hello foo\n world", expected: true},
-		{regex: ".*", value: "foo", expected: true},
-		{regex: "", value: "foo", expected: false},
-		{regex: "", value: "", expected: true},
-	}
-
-	for _, c := range cases {
-		m, err := NewFastRegexMatcher(c.regex)
-		require.NoError(t, err)
-		require.Equal(t, c.expected, m.MatchString(c.value))
-	}
-}
-
-func TestOptimizeConcatRegex(t *testing.T) {
-	cases := []struct {
-		regex    string
-		prefix   string
-		suffix   string
-		contains string
-	}{
-		{regex: "foo(hello|bar)", prefix: "foo", suffix: "", contains: ""},
-		{regex: "foo(hello|bar)world", prefix: "foo", suffix: "world", contains: ""},
-		{regex: "foo.*", prefix: "foo", suffix: "", contains: ""},
-		{regex: "foo.*hello.*bar", prefix: "foo", suffix: "bar", contains: "hello"},
-		{regex: ".*foo", prefix: "", suffix: "foo", contains: ""},
-		{regex: "^.*foo$", prefix: "", suffix: "foo", contains: ""},
-		{regex: ".*foo.*", prefix: "", suffix: "", contains: "foo"},
-		{regex: ".*foo.*bar.*", prefix: "", suffix: "", contains: "foo"},
-		{regex: ".*(foo|bar).*", prefix: "", suffix: "", contains: ""},
-		{regex: ".*[abc].*", prefix: "", suffix: "", contains: ""},
-		{regex: ".*((?i)abc).*", prefix: "", suffix: "", contains: ""},
-		{regex: ".*(?i:abc).*", prefix: "", suffix: "", contains: ""},
-		{regex: "(?i:abc).*", prefix: "", suffix: "", contains: ""},
-		{regex: ".*(?i:abc)", prefix: "", suffix: "", contains: ""},
-		{regex: ".*(?i:abc)def.*", prefix: "", suffix: "", contains: "def"},
-		{regex: "(?i).*(?-i:abc)def", prefix: "", suffix: "", contains: "abc"},
-		{regex: ".*(?msU:abc).*", prefix: "", suffix: "", contains: "abc"},
-		{regex: "[aA]bc.*", prefix: "", suffix: "", contains: "bc"},
-	}
-
-	for _, c := range cases {
-		parsed, err := syntax.Parse(c.regex, syntax.Perl)
-		require.NoError(t, err)
-
-		prefix, suffix, contains := optimizeConcatRegex(parsed)
-		require.Equal(t, c.prefix, prefix)
-		require.Equal(t, c.suffix, suffix)
-		require.Equal(t, c.contains, contains)
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/labels/test_utils.go 2.33.5+ds1-2/pkg/labels/test_utils.go
--- 2.31.2+ds1-1/pkg/labels/test_utils.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/labels/test_utils.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,87 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package labels
-
-import (
-	"bufio"
-	"os"
-	"sort"
-	"strings"
-
-	"github.com/pkg/errors"
-)
-
-// Slice is a sortable slice of label sets.
-type Slice []Labels
-
-func (s Slice) Len() int           { return len(s) }
-func (s Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
-func (s Slice) Less(i, j int) bool { return Compare(s[i], s[j]) < 0 }
-
-// Selector holds constraints for matching against a label set.
-type Selector []*Matcher
-
-// Matches returns whether the labels satisfy all matchers.
-func (s Selector) Matches(labels Labels) bool {
-	for _, m := range s {
-		if v := labels.Get(m.Name); !m.Matches(v) {
-			return false
-		}
-	}
-	return true
-}
-
-// ReadLabels reads up to n label sets in a JSON formatted file fn. It is mostly useful
-// to load testing data.
-func ReadLabels(fn string, n int) ([]Labels, error) {
-	f, err := os.Open(fn)
-	if err != nil {
-		return nil, err
-	}
-	defer f.Close()
-
-	scanner := bufio.NewScanner(f)
-
-	var mets []Labels
-	hashes := map[uint64]struct{}{}
-	i := 0
-
-	for scanner.Scan() && i < n {
-		m := make(Labels, 0, 10)
-
-		r := strings.NewReplacer("\"", "", "{", "", "}", "")
-		s := r.Replace(scanner.Text())
-
-		labelChunks := strings.Split(s, ",")
-		for _, labelChunk := range labelChunks {
-			split := strings.Split(labelChunk, ":")
-			m = append(m, Label{Name: split[0], Value: split[1]})
-		}
-		// Order of the k/v labels matters, don't assume we'll always receive them already sorted.
-		sort.Sort(m)
-
-		h := m.Hash()
-		if _, ok := hashes[h]; ok {
-			continue
-		}
-		mets = append(mets, m)
-		hashes[h] = struct{}{}
-		i++
-	}
-
-	if i != n {
-		return mets, errors.Errorf("requested %d metrics but found %d", n, i)
-	}
-	return mets, nil
-}
diff -pruN 2.31.2+ds1-1/pkg/logging/dedupe.go 2.33.5+ds1-2/pkg/logging/dedupe.go
--- 2.31.2+ds1-1/pkg/logging/dedupe.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/logging/dedupe.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,130 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logging
-
-import (
-	"bytes"
-	"sync"
-	"time"
-
-	"github.com/go-kit/log"
-	"github.com/go-logfmt/logfmt"
-)
-
-const (
-	garbageCollectEvery = 10 * time.Second
-	expireEntriesAfter  = 1 * time.Minute
-	maxEntries          = 1024
-)
-
-type logfmtEncoder struct {
-	*logfmt.Encoder
-	buf bytes.Buffer
-}
-
-var logfmtEncoderPool = sync.Pool{
-	New: func() interface{} {
-		var enc logfmtEncoder
-		enc.Encoder = logfmt.NewEncoder(&enc.buf)
-		return &enc
-	},
-}
-
-// Deduper implement log.Logger, dedupes log lines.
-type Deduper struct {
-	next   log.Logger
-	repeat time.Duration
-	quit   chan struct{}
-	mtx    sync.RWMutex
-	seen   map[string]time.Time
-}
-
-// Dedupe log lines to next, only repeating every repeat duration.
-func Dedupe(next log.Logger, repeat time.Duration) *Deduper {
-	d := &Deduper{
-		next:   next,
-		repeat: repeat,
-		quit:   make(chan struct{}),
-		seen:   map[string]time.Time{},
-	}
-	go d.run()
-	return d
-}
-
-// Stop the Deduper.
-func (d *Deduper) Stop() {
-	close(d.quit)
-}
-
-func (d *Deduper) run() {
-	ticker := time.NewTicker(garbageCollectEvery)
-	defer ticker.Stop()
-
-	for {
-		select {
-		case <-ticker.C:
-			d.mtx.Lock()
-			now := time.Now()
-			for line, seen := range d.seen {
-				if now.Sub(seen) > expireEntriesAfter {
-					delete(d.seen, line)
-				}
-			}
-			d.mtx.Unlock()
-		case <-d.quit:
-			return
-		}
-	}
-}
-
-// Log implements log.Logger.
-func (d *Deduper) Log(keyvals ...interface{}) error {
-	line, err := encode(keyvals...)
-	if err != nil {
-		return err
-	}
-
-	d.mtx.RLock()
-	last, ok := d.seen[line]
-	d.mtx.RUnlock()
-
-	if ok && time.Since(last) < d.repeat {
-		return nil
-	}
-
-	d.mtx.Lock()
-	if len(d.seen) < maxEntries {
-		d.seen[line] = time.Now()
-	}
-	d.mtx.Unlock()
-
-	return d.next.Log(keyvals...)
-}
-
-func encode(keyvals ...interface{}) (string, error) {
-	enc := logfmtEncoderPool.Get().(*logfmtEncoder)
-	enc.buf.Reset()
-	defer logfmtEncoderPool.Put(enc)
-
-	if err := enc.EncodeKeyvals(keyvals...); err != nil {
-		return "", err
-	}
-
-	// Add newline to the end of the buffer
-	if err := enc.EndRecord(); err != nil {
-		return "", err
-	}
-
-	return enc.buf.String(), nil
-}
diff -pruN 2.31.2+ds1-1/pkg/logging/dedupe_test.go 2.33.5+ds1-2/pkg/logging/dedupe_test.go
--- 2.31.2+ds1-1/pkg/logging/dedupe_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/logging/dedupe_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,47 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logging
-
-import (
-	"testing"
-	"time"
-
-	"github.com/stretchr/testify/require"
-)
-
-type counter int
-
-func (c *counter) Log(keyvals ...interface{}) error {
-	(*c)++
-	return nil
-}
-
-func TestDedupe(t *testing.T) {
-	var c counter
-	d := Dedupe(&c, 100*time.Millisecond)
-	defer d.Stop()
-
-	// Log 10 times quickly, ensure they are deduped.
-	for i := 0; i < 10; i++ {
-		err := d.Log("msg", "hello")
-		require.NoError(t, err)
-	}
-	require.Equal(t, 1, int(c))
-
-	// Wait, then log again, make sure it is logged.
-	time.Sleep(200 * time.Millisecond)
-	err := d.Log("msg", "hello")
-	require.NoError(t, err)
-	require.Equal(t, 2, int(c))
-}
diff -pruN 2.31.2+ds1-1/pkg/logging/file.go 2.33.5+ds1-2/pkg/logging/file.go
--- 2.31.2+ds1-1/pkg/logging/file.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/logging/file.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,62 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logging
-
-import (
-	"os"
-	"time"
-
-	"github.com/go-kit/log"
-	"github.com/pkg/errors"
-)
-
-var (
-	timestampFormat = log.TimestampFormat(
-		func() time.Time { return time.Now().UTC() },
-		"2006-01-02T15:04:05.000Z07:00",
-	)
-)
-
-// JSONFileLogger represents a logger that writes JSON to a file.
-type JSONFileLogger struct {
-	logger log.Logger
-	file   *os.File
-}
-
-// NewJSONFileLogger returns a new JSONFileLogger.
-func NewJSONFileLogger(s string) (*JSONFileLogger, error) {
-	if s == "" {
-		return nil, nil
-	}
-
-	f, err := os.OpenFile(s, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0666)
-	if err != nil {
-		return nil, errors.Wrap(err, "can't create json logger")
-	}
-
-	return &JSONFileLogger{
-		logger: log.With(log.NewJSONLogger(f), "ts", timestampFormat),
-		file:   f,
-	}, nil
-}
-
-// Close closes the underlying file.
-func (l *JSONFileLogger) Close() error {
-	return l.file.Close()
-}
-
-// Log calls the Log function of the underlying logger.
-func (l *JSONFileLogger) Log(i ...interface{}) error {
-	return l.logger.Log(i...)
-}
diff -pruN 2.31.2+ds1-1/pkg/logging/file_test.go 2.33.5+ds1-2/pkg/logging/file_test.go
--- 2.31.2+ds1-1/pkg/logging/file_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/logging/file_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,90 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logging
-
-import (
-	"io/ioutil"
-	"os"
-	"regexp"
-	"strings"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestJSONFileLogger_basic(t *testing.T) {
-	f, err := ioutil.TempFile("", "logging")
-	require.NoError(t, err)
-	defer func() {
-		require.NoError(t, f.Close())
-		require.NoError(t, os.Remove(f.Name()))
-	}()
-
-	l, err := NewJSONFileLogger(f.Name())
-	require.NoError(t, err)
-	require.NotNil(t, l, "logger can't be nil")
-
-	err = l.Log("test", "yes")
-	require.NoError(t, err)
-	r := make([]byte, 1024)
-	_, err = f.Read(r)
-	require.NoError(t, err)
-	result, err := regexp.Match(`^{"test":"yes","ts":"[^"]+"}\n`, r)
-	require.NoError(t, err)
-	require.True(t, result, "unexpected content: %s", r)
-
-	err = l.Close()
-	require.NoError(t, err)
-
-	err = l.file.Close()
-	require.Error(t, err)
-	require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
-}
-
-func TestJSONFileLogger_parallel(t *testing.T) {
-	f, err := ioutil.TempFile("", "logging")
-	require.NoError(t, err)
-	defer func() {
-		require.NoError(t, f.Close())
-		require.NoError(t, os.Remove(f.Name()))
-	}()
-
-	l, err := NewJSONFileLogger(f.Name())
-	require.NoError(t, err)
-	require.NotNil(t, l, "logger can't be nil")
-
-	err = l.Log("test", "yes")
-	require.NoError(t, err)
-
-	l2, err := NewJSONFileLogger(f.Name())
-	require.NoError(t, err)
-	require.NotNil(t, l, "logger can't be nil")
-
-	err = l2.Log("test", "yes")
-	require.NoError(t, err)
-
-	err = l.Close()
-	require.NoError(t, err)
-
-	err = l.file.Close()
-	require.Error(t, err)
-	require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
-
-	err = l2.Close()
-	require.NoError(t, err)
-
-	err = l2.file.Close()
-	require.Error(t, err)
-	require.True(t, strings.HasSuffix(err.Error(), os.ErrClosed.Error()), "file not closed")
-}
diff -pruN 2.31.2+ds1-1/pkg/logging/ratelimit.go 2.33.5+ds1-2/pkg/logging/ratelimit.go
--- 2.31.2+ds1-1/pkg/logging/ratelimit.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/logging/ratelimit.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,39 +0,0 @@
-// Copyright 2019 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package logging
-
-import (
-	"github.com/go-kit/log"
-	"golang.org/x/time/rate"
-)
-
-type ratelimiter struct {
-	limiter *rate.Limiter
-	next    log.Logger
-}
-
-// RateLimit write to a logger.
-func RateLimit(next log.Logger, limit rate.Limit) log.Logger {
-	return &ratelimiter{
-		limiter: rate.NewLimiter(limit, int(limit)),
-		next:    next,
-	}
-}
-
-func (r *ratelimiter) Log(keyvals ...interface{}) error {
-	if r.limiter.Allow() {
-		return r.next.Log(keyvals...)
-	}
-	return nil
-}
diff -pruN 2.31.2+ds1-1/pkg/modtimevfs/modtimevfs.go 2.33.5+ds1-2/pkg/modtimevfs/modtimevfs.go
--- 2.31.2+ds1-1/pkg/modtimevfs/modtimevfs.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/modtimevfs/modtimevfs.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,67 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package modtimevfs implements a virtual file system that returns a fixed
-// modification time for all files and directories.
-package modtimevfs
-
-import (
-	"net/http"
-	"os"
-	"time"
-)
-
-type timefs struct {
-	fs http.FileSystem
-	t  time.Time
-}
-
-// New returns a file system that returns constant modification time for all files.
-func New(fs http.FileSystem, t time.Time) http.FileSystem {
-	return &timefs{fs: fs, t: t}
-}
-
-type file struct {
-	http.File
-	os.FileInfo
-	t time.Time
-}
-
-func (t *timefs) Open(name string) (http.File, error) {
-	f, err := t.fs.Open(name)
-	if err != nil {
-		return f, err
-	}
-	defer func() {
-		if err != nil {
-			f.Close()
-		}
-	}()
-
-	fstat, err := f.Stat()
-	if err != nil {
-		return nil, err
-	}
-
-	return &file{f, fstat, t.t}, nil
-}
-
-// Stat implements the http.File interface.
-func (f *file) Stat() (os.FileInfo, error) {
-	return f, nil
-}
-
-// ModTime implements the os.FileInfo interface.
-func (f *file) ModTime() time.Time {
-	return f.t
-}
diff -pruN 2.31.2+ds1-1/pkg/pool/pool.go 2.33.5+ds1-2/pkg/pool/pool.go
--- 2.31.2+ds1-1/pkg/pool/pool.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/pool/pool.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,87 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pool
-
-import (
-	"fmt"
-	"reflect"
-	"sync"
-)
-
-// Pool is a bucketed pool for variably sized byte slices.
-type Pool struct {
-	buckets []sync.Pool
-	sizes   []int
-	// make is the function used to create an empty slice when none exist yet.
-	make func(int) interface{}
-}
-
-// New returns a new Pool with size buckets for minSize to maxSize
-// increasing by the given factor.
-func New(minSize, maxSize int, factor float64, makeFunc func(int) interface{}) *Pool {
-	if minSize < 1 {
-		panic("invalid minimum pool size")
-	}
-	if maxSize < 1 {
-		panic("invalid maximum pool size")
-	}
-	if factor < 1 {
-		panic("invalid factor")
-	}
-
-	var sizes []int
-
-	for s := minSize; s <= maxSize; s = int(float64(s) * factor) {
-		sizes = append(sizes, s)
-	}
-
-	p := &Pool{
-		buckets: make([]sync.Pool, len(sizes)),
-		sizes:   sizes,
-		make:    makeFunc,
-	}
-
-	return p
-}
-
-// Get returns a new byte slices that fits the given size.
-func (p *Pool) Get(sz int) interface{} {
-	for i, bktSize := range p.sizes {
-		if sz > bktSize {
-			continue
-		}
-		b := p.buckets[i].Get()
-		if b == nil {
-			b = p.make(bktSize)
-		}
-		return b
-	}
-	return p.make(sz)
-}
-
-// Put adds a slice to the right bucket in the pool.
-func (p *Pool) Put(s interface{}) {
-	slice := reflect.ValueOf(s)
-
-	if slice.Kind() != reflect.Slice {
-		panic(fmt.Sprintf("%+v is not a slice", slice))
-	}
-	for i, size := range p.sizes {
-		if slice.Cap() > size {
-			continue
-		}
-		p.buckets[i].Put(slice.Slice(0, 0).Interface())
-		return
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/pool/pool_test.go 2.33.5+ds1-2/pkg/pool/pool_test.go
--- 2.31.2+ds1-1/pkg/pool/pool_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/pool/pool_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,50 +0,0 @@
-// Copyright 2020 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package pool
-
-import (
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func makeFunc(size int) interface{} {
-	return make([]int, 0, size)
-}
-
-func TestPool(t *testing.T) {
-	testPool := New(1, 8, 2, makeFunc)
-	cases := []struct {
-		size        int
-		expectedCap int
-	}{
-		{
-			size:        -1,
-			expectedCap: 1,
-		},
-		{
-			size:        3,
-			expectedCap: 4,
-		},
-		{
-			size:        10,
-			expectedCap: 10,
-		},
-	}
-	for _, c := range cases {
-		ret := testPool.Get(c.size)
-		require.Equal(t, c.expectedCap, cap(ret.([]int)))
-		testPool.Put(ret)
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/README.md 2.33.5+ds1-2/pkg/README.md
--- 2.31.2+ds1-1/pkg/README.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/README.md	1970-01-01 00:00:00.000000000 +0000
@@ -1,3 +0,0 @@
-The `pkg` directory is deprecated.
-Please do not add new packages to this directory.
-Existing packages will be moved elsewhere eventually.
diff -pruN 2.31.2+ds1-1/pkg/relabel/relabel.go 2.33.5+ds1-2/pkg/relabel/relabel.go
--- 2.31.2+ds1-1/pkg/relabel/relabel.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/relabel/relabel.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,270 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package relabel
-
-import (
-	"crypto/md5"
-	"fmt"
-	"regexp"
-	"strings"
-
-	"github.com/pkg/errors"
-	"github.com/prometheus/common/model"
-
-	"github.com/prometheus/prometheus/pkg/labels"
-)
-
-var (
-	relabelTarget = regexp.MustCompile(`^(?:(?:[a-zA-Z_]|\$(?:\{\w+\}|\w+))+\w*)+$`)
-
-	DefaultRelabelConfig = Config{
-		Action:      Replace,
-		Separator:   ";",
-		Regex:       MustNewRegexp("(.*)"),
-		Replacement: "$1",
-	}
-)
-
-// Action is the action to be performed on relabeling.
-type Action string
-
-const (
-	// Replace performs a regex replacement.
-	Replace Action = "replace"
-	// Keep drops targets for which the input does not match the regex.
-	Keep Action = "keep"
-	// Drop drops targets for which the input does match the regex.
-	Drop Action = "drop"
-	// HashMod sets a label to the modulus of a hash of labels.
-	HashMod Action = "hashmod"
-	// LabelMap copies labels to other labelnames based on a regex.
-	LabelMap Action = "labelmap"
-	// LabelDrop drops any label matching the regex.
-	LabelDrop Action = "labeldrop"
-	// LabelKeep drops any label not matching the regex.
-	LabelKeep Action = "labelkeep"
-)
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (a *Action) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	var s string
-	if err := unmarshal(&s); err != nil {
-		return err
-	}
-	switch act := Action(strings.ToLower(s)); act {
-	case Replace, Keep, Drop, HashMod, LabelMap, LabelDrop, LabelKeep:
-		*a = act
-		return nil
-	}
-	return errors.Errorf("unknown relabel action %q", s)
-}
-
-// Config is the configuration for relabeling of target label sets.
-type Config struct {
-	// A list of labels from which values are taken and concatenated
-	// with the configured separator in order.
-	SourceLabels model.LabelNames `yaml:"source_labels,flow,omitempty"`
-	// Separator is the string between concatenated values from the source labels.
-	Separator string `yaml:"separator,omitempty"`
-	// Regex against which the concatenation is matched.
-	Regex Regexp `yaml:"regex,omitempty"`
-	// Modulus to take of the hash of concatenated values from the source labels.
-	Modulus uint64 `yaml:"modulus,omitempty"`
-	// TargetLabel is the label to which the resulting string is written in a replacement.
-	// Regexp interpolation is allowed for the replace action.
-	TargetLabel string `yaml:"target_label,omitempty"`
-	// Replacement is the regex replacement pattern to be used.
-	Replacement string `yaml:"replacement,omitempty"`
-	// Action is the action to be performed for the relabeling.
-	Action Action `yaml:"action,omitempty"`
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (c *Config) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	*c = DefaultRelabelConfig
-	type plain Config
-	if err := unmarshal((*plain)(c)); err != nil {
-		return err
-	}
-	if c.Regex.Regexp == nil {
-		c.Regex = MustNewRegexp("")
-	}
-	if c.Action == "" {
-		return errors.Errorf("relabel action cannot be empty")
-	}
-	if c.Modulus == 0 && c.Action == HashMod {
-		return errors.Errorf("relabel configuration for hashmod requires non-zero modulus")
-	}
-	if (c.Action == Replace || c.Action == HashMod) && c.TargetLabel == "" {
-		return errors.Errorf("relabel configuration for %s action requires 'target_label' value", c.Action)
-	}
-	if c.Action == Replace && !relabelTarget.MatchString(c.TargetLabel) {
-		return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
-	}
-	if c.Action == LabelMap && !relabelTarget.MatchString(c.Replacement) {
-		return errors.Errorf("%q is invalid 'replacement' for %s action", c.Replacement, c.Action)
-	}
-	if c.Action == HashMod && !model.LabelName(c.TargetLabel).IsValid() {
-		return errors.Errorf("%q is invalid 'target_label' for %s action", c.TargetLabel, c.Action)
-	}
-
-	if c.Action == LabelDrop || c.Action == LabelKeep {
-		if c.SourceLabels != nil ||
-			c.TargetLabel != DefaultRelabelConfig.TargetLabel ||
-			c.Modulus != DefaultRelabelConfig.Modulus ||
-			c.Separator != DefaultRelabelConfig.Separator ||
-			c.Replacement != DefaultRelabelConfig.Replacement {
-			return errors.Errorf("%s action requires only 'regex', and no other fields", c.Action)
-		}
-	}
-
-	return nil
-}
-
-// Regexp encapsulates a regexp.Regexp and makes it YAML marshalable.
-type Regexp struct {
-	*regexp.Regexp
-	original string
-}
-
-// NewRegexp creates a new anchored Regexp and returns an error if the
-// passed-in regular expression does not compile.
-func NewRegexp(s string) (Regexp, error) {
-	regex, err := regexp.Compile("^(?:" + s + ")$")
-	return Regexp{
-		Regexp:   regex,
-		original: s,
-	}, err
-}
-
-// MustNewRegexp works like NewRegexp, but panics if the regular expression does not compile.
-func MustNewRegexp(s string) Regexp {
-	re, err := NewRegexp(s)
-	if err != nil {
-		panic(err)
-	}
-	return re
-}
-
-// UnmarshalYAML implements the yaml.Unmarshaler interface.
-func (re *Regexp) UnmarshalYAML(unmarshal func(interface{}) error) error {
-	var s string
-	if err := unmarshal(&s); err != nil {
-		return err
-	}
-	r, err := NewRegexp(s)
-	if err != nil {
-		return err
-	}
-	*re = r
-	return nil
-}
-
-// MarshalYAML implements the yaml.Marshaler interface.
-func (re Regexp) MarshalYAML() (interface{}, error) {
-	if re.original != "" {
-		return re.original, nil
-	}
-	return nil, nil
-}
-
-// Process returns a relabeled copy of the given label set. The relabel configurations
-// are applied in order of input.
-// If a label set is dropped, nil is returned.
-// May return the input labelSet modified.
-func Process(labels labels.Labels, cfgs ...*Config) labels.Labels {
-	for _, cfg := range cfgs {
-		labels = relabel(labels, cfg)
-		if labels == nil {
-			return nil
-		}
-	}
-	return labels
-}
-
-func relabel(lset labels.Labels, cfg *Config) labels.Labels {
-	values := make([]string, 0, len(cfg.SourceLabels))
-	for _, ln := range cfg.SourceLabels {
-		values = append(values, lset.Get(string(ln)))
-	}
-	val := strings.Join(values, cfg.Separator)
-
-	lb := labels.NewBuilder(lset)
-
-	switch cfg.Action {
-	case Drop:
-		if cfg.Regex.MatchString(val) {
-			return nil
-		}
-	case Keep:
-		if !cfg.Regex.MatchString(val) {
-			return nil
-		}
-	case Replace:
-		indexes := cfg.Regex.FindStringSubmatchIndex(val)
-		// If there is no match no replacement must take place.
-		if indexes == nil {
-			break
-		}
-		target := model.LabelName(cfg.Regex.ExpandString([]byte{}, cfg.TargetLabel, val, indexes))
-		if !target.IsValid() {
-			lb.Del(cfg.TargetLabel)
-			break
-		}
-		res := cfg.Regex.ExpandString([]byte{}, cfg.Replacement, val, indexes)
-		if len(res) == 0 {
-			lb.Del(cfg.TargetLabel)
-			break
-		}
-		lb.Set(string(target), string(res))
-	case HashMod:
-		mod := sum64(md5.Sum([]byte(val))) % cfg.Modulus
-		lb.Set(cfg.TargetLabel, fmt.Sprintf("%d", mod))
-	case LabelMap:
-		for _, l := range lset {
-			if cfg.Regex.MatchString(l.Name) {
-				res := cfg.Regex.ReplaceAllString(l.Name, cfg.Replacement)
-				lb.Set(res, l.Value)
-			}
-		}
-	case LabelDrop:
-		for _, l := range lset {
-			if cfg.Regex.MatchString(l.Name) {
-				lb.Del(l.Name)
-			}
-		}
-	case LabelKeep:
-		for _, l := range lset {
-			if !cfg.Regex.MatchString(l.Name) {
-				lb.Del(l.Name)
-			}
-		}
-	default:
-		panic(errors.Errorf("relabel: unknown relabel action type %q", cfg.Action))
-	}
-
-	return lb.Labels()
-}
-
-// sum64 sums the md5 hash to an uint64.
-func sum64(hash [md5.Size]byte) uint64 {
-	var s uint64
-
-	for i, b := range hash {
-		shift := uint64((md5.Size - i - 1) * 8)
-
-		s |= uint64(b) << shift
-	}
-	return s
-}
diff -pruN 2.31.2+ds1-1/pkg/relabel/relabel_test.go 2.33.5+ds1-2/pkg/relabel/relabel_test.go
--- 2.31.2+ds1-1/pkg/relabel/relabel_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/relabel/relabel_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,480 +0,0 @@
-// Copyright 2015 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package relabel
-
-import (
-	"testing"
-
-	"github.com/prometheus/common/model"
-	"github.com/stretchr/testify/require"
-
-	"github.com/prometheus/prometheus/pkg/labels"
-)
-
-func TestRelabel(t *testing.T) {
-	tests := []struct {
-		input   labels.Labels
-		relabel []*Config
-		output  labels.Labels
-	}{
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-				"b": "bar",
-				"c": "baz",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("f(.*)"),
-					TargetLabel:  "d",
-					Separator:    ";",
-					Replacement:  "ch${1}-ch${1}",
-					Action:       Replace,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo",
-				"b": "bar",
-				"c": "baz",
-				"d": "choo-choo",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-				"b": "bar",
-				"c": "baz",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a", "b"},
-					Regex:        MustNewRegexp("f(.*);(.*)r"),
-					TargetLabel:  "a",
-					Separator:    ";",
-					Replacement:  "b${1}${2}m", // boobam
-					Action:       Replace,
-				},
-				{
-					SourceLabels: model.LabelNames{"c", "a"},
-					Regex:        MustNewRegexp("(b).*b(.*)ba(.*)"),
-					TargetLabel:  "d",
-					Separator:    ";",
-					Replacement:  "$1$2$2$3",
-					Action:       Replace,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "boobam",
-				"b": "bar",
-				"c": "baz",
-				"d": "boooom",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp(".*o.*"),
-					Action:       Drop,
-				}, {
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("f(.*)"),
-					TargetLabel:  "d",
-					Separator:    ";",
-					Replacement:  "ch$1-ch$1",
-					Action:       Replace,
-				},
-			},
-			output: nil,
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-				"b": "bar",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp(".*o.*"),
-					Action:       Drop,
-				},
-			},
-			output: nil,
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "abc",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp(".*(b).*"),
-					TargetLabel:  "d",
-					Separator:    ";",
-					Replacement:  "$1",
-					Action:       Replace,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "abc",
-				"d": "b",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("no-match"),
-					Action:       Drop,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("f|o"),
-					Action:       Drop,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("no-match"),
-					Action:       Keep,
-				},
-			},
-			output: nil,
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("f.*"),
-					Action:       Keep,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-		},
-		{
-			// No replacement must be applied if there is no match.
-			input: labels.FromMap(map[string]string{
-				"a": "boo",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("f"),
-					TargetLabel:  "b",
-					Replacement:  "bar",
-					Action:       Replace,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "boo",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo",
-				"b": "bar",
-				"c": "baz",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"c"},
-					TargetLabel:  "d",
-					Separator:    ";",
-					Action:       HashMod,
-					Modulus:      1000,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo",
-				"b": "bar",
-				"c": "baz",
-				"d": "976",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a": "foo\nbar",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					TargetLabel:  "b",
-					Separator:    ";",
-					Action:       HashMod,
-					Modulus:      1000,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo\nbar",
-				"b": "734",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a":  "foo",
-				"b1": "bar",
-				"b2": "baz",
-			}),
-			relabel: []*Config{
-				{
-					Regex:       MustNewRegexp("(b.*)"),
-					Replacement: "bar_${1}",
-					Action:      LabelMap,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a":      "foo",
-				"b1":     "bar",
-				"b2":     "baz",
-				"bar_b1": "bar",
-				"bar_b2": "baz",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a":             "foo",
-				"__meta_my_bar": "aaa",
-				"__meta_my_baz": "bbb",
-				"__meta_other":  "ccc",
-			}),
-			relabel: []*Config{
-				{
-					Regex:       MustNewRegexp("__meta_(my.*)"),
-					Replacement: "${1}",
-					Action:      LabelMap,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a":             "foo",
-				"__meta_my_bar": "aaa",
-				"__meta_my_baz": "bbb",
-				"__meta_other":  "ccc",
-				"my_bar":        "aaa",
-				"my_baz":        "bbb",
-			}),
-		},
-		{ // valid case
-			input: labels.FromMap(map[string]string{
-				"a": "some-name-value",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
-					Action:       Replace,
-					Replacement:  "${2}",
-					TargetLabel:  "${1}",
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a":    "some-name-value",
-				"name": "value",
-			}),
-		},
-		{ // invalid replacement ""
-			input: labels.FromMap(map[string]string{
-				"a": "some-name-value",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
-					Action:       Replace,
-					Replacement:  "${3}",
-					TargetLabel:  "${1}",
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "some-name-value",
-			}),
-		},
-		{ // invalid target_labels
-			input: labels.FromMap(map[string]string{
-				"a": "some-name-value",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
-					Action:       Replace,
-					Replacement:  "${1}",
-					TargetLabel:  "${3}",
-				},
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
-					Action:       Replace,
-					Replacement:  "${1}",
-					TargetLabel:  "0${3}",
-				},
-				{
-					SourceLabels: model.LabelNames{"a"},
-					Regex:        MustNewRegexp("some-([^-]+)-([^,]+)"),
-					Action:       Replace,
-					Replacement:  "${1}",
-					TargetLabel:  "-${3}",
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "some-name-value",
-			}),
-		},
-		{ // more complex real-life like usecase
-			input: labels.FromMap(map[string]string{
-				"__meta_sd_tags": "path:/secret,job:some-job,label:foo=bar",
-			}),
-			relabel: []*Config{
-				{
-					SourceLabels: model.LabelNames{"__meta_sd_tags"},
-					Regex:        MustNewRegexp("(?:.+,|^)path:(/[^,]+).*"),
-					Action:       Replace,
-					Replacement:  "${1}",
-					TargetLabel:  "__metrics_path__",
-				},
-				{
-					SourceLabels: model.LabelNames{"__meta_sd_tags"},
-					Regex:        MustNewRegexp("(?:.+,|^)job:([^,]+).*"),
-					Action:       Replace,
-					Replacement:  "${1}",
-					TargetLabel:  "job",
-				},
-				{
-					SourceLabels: model.LabelNames{"__meta_sd_tags"},
-					Regex:        MustNewRegexp("(?:.+,|^)label:([^=]+)=([^,]+).*"),
-					Action:       Replace,
-					Replacement:  "${2}",
-					TargetLabel:  "${1}",
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"__meta_sd_tags":   "path:/secret,job:some-job,label:foo=bar",
-				"__metrics_path__": "/secret",
-				"job":              "some-job",
-				"foo":              "bar",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a":  "foo",
-				"b1": "bar",
-				"b2": "baz",
-			}),
-			relabel: []*Config{
-				{
-					Regex:  MustNewRegexp("(b.*)"),
-					Action: LabelKeep,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"b1": "bar",
-				"b2": "baz",
-			}),
-		},
-		{
-			input: labels.FromMap(map[string]string{
-				"a":  "foo",
-				"b1": "bar",
-				"b2": "baz",
-			}),
-			relabel: []*Config{
-				{
-					Regex:  MustNewRegexp("(b.*)"),
-					Action: LabelDrop,
-				},
-			},
-			output: labels.FromMap(map[string]string{
-				"a": "foo",
-			}),
-		},
-	}
-
-	for _, test := range tests {
-		// Setting default fields, mimicking the behaviour in Prometheus.
-		for _, cfg := range test.relabel {
-			if cfg.Action == "" {
-				cfg.Action = DefaultRelabelConfig.Action
-			}
-			if cfg.Separator == "" {
-				cfg.Separator = DefaultRelabelConfig.Separator
-			}
-			if cfg.Regex.original == "" {
-				cfg.Regex = DefaultRelabelConfig.Regex
-			}
-			if cfg.Replacement == "" {
-				cfg.Replacement = DefaultRelabelConfig.Replacement
-			}
-		}
-
-		res := Process(test.input, test.relabel...)
-		require.Equal(t, test.output, res)
-	}
-}
-
-func TestTargetLabelValidity(t *testing.T) {
-	tests := []struct {
-		str   string
-		valid bool
-	}{
-		{"-label", false},
-		{"label", true},
-		{"label${1}", true},
-		{"${1}label", true},
-		{"${1}", true},
-		{"${1}label", true},
-		{"${", false},
-		{"$", false},
-		{"${}", false},
-		{"foo${", false},
-		{"$1", true},
-		{"asd$2asd", true},
-		{"-foo${1}bar-", false},
-		{"_${1}_", true},
-		{"foo${bar}foo", true},
-	}
-	for _, test := range tests {
-		require.Equal(t, test.valid, relabelTarget.Match([]byte(test.str)),
-			"Expected %q to be %v", test.str, test.valid)
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/rulefmt.go 2.33.5+ds1-2/pkg/rulefmt/rulefmt.go
--- 2.31.2+ds1-1/pkg/rulefmt/rulefmt.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/rulefmt.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,305 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rulefmt
-
-import (
-	"bytes"
-	"context"
-	"io"
-	"io/ioutil"
-	"strings"
-	"time"
-
-	"github.com/pkg/errors"
-	"github.com/prometheus/common/model"
-	yaml "gopkg.in/yaml.v3"
-
-	"github.com/prometheus/prometheus/pkg/timestamp"
-	"github.com/prometheus/prometheus/promql/parser"
-	"github.com/prometheus/prometheus/template"
-)
-
-// Error represents semantic errors on parsing rule groups.
-type Error struct {
-	Group    string
-	Rule     int
-	RuleName string
-	Err      WrappedError
-}
-
-// WrappedError wraps error with the yaml node which can be used to represent
-// the line and column numbers of the error.
-type WrappedError struct {
-	err     error
-	node    *yaml.Node
-	nodeAlt *yaml.Node
-}
-
-func (err *Error) Error() string {
-	if err.Err.nodeAlt != nil {
-		return errors.Wrapf(err.Err.err, "%d:%d: %d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Err.nodeAlt.Line, err.Err.nodeAlt.Column, err.Group, err.Rule, err.RuleName).Error()
-	} else if err.Err.node != nil {
-		return errors.Wrapf(err.Err.err, "%d:%d: group %q, rule %d, %q", err.Err.node.Line, err.Err.node.Column, err.Group, err.Rule, err.RuleName).Error()
-	}
-	return errors.Wrapf(err.Err.err, "group %q, rule %d, %q", err.Group, err.Rule, err.RuleName).Error()
-}
-
-// RuleGroups is a set of rule groups that are typically exposed in a file.
-type RuleGroups struct {
-	Groups []RuleGroup `yaml:"groups"`
-}
-
-type ruleGroups struct {
-	Groups []yaml.Node `yaml:"groups"`
-}
-
-// Validate validates all rules in the rule groups.
-func (g *RuleGroups) Validate(node ruleGroups) (errs []error) {
-	set := map[string]struct{}{}
-
-	for j, g := range g.Groups {
-		if g.Name == "" {
-			errs = append(errs, errors.Errorf("%d:%d: Groupname must not be empty", node.Groups[j].Line, node.Groups[j].Column))
-		}
-
-		if _, ok := set[g.Name]; ok {
-			errs = append(
-				errs,
-				errors.Errorf("%d:%d: groupname: \"%s\" is repeated in the same file", node.Groups[j].Line, node.Groups[j].Column, g.Name),
-			)
-		}
-
-		set[g.Name] = struct{}{}
-
-		for i, r := range g.Rules {
-			for _, node := range g.Rules[i].Validate() {
-				var ruleName yaml.Node
-				if r.Alert.Value != "" {
-					ruleName = r.Alert
-				} else {
-					ruleName = r.Record
-				}
-				errs = append(errs, &Error{
-					Group:    g.Name,
-					Rule:     i + 1,
-					RuleName: ruleName.Value,
-					Err:      node,
-				})
-			}
-		}
-	}
-
-	return errs
-}
-
-// RuleGroup is a list of sequentially evaluated recording and alerting rules.
-type RuleGroup struct {
-	Name     string         `yaml:"name"`
-	Interval model.Duration `yaml:"interval,omitempty"`
-	Limit    int            `yaml:"limit,omitempty"`
-	Rules    []RuleNode     `yaml:"rules"`
-}
-
-// Rule describes an alerting or recording rule.
-type Rule struct {
-	Record      string            `yaml:"record,omitempty"`
-	Alert       string            `yaml:"alert,omitempty"`
-	Expr        string            `yaml:"expr"`
-	For         model.Duration    `yaml:"for,omitempty"`
-	Labels      map[string]string `yaml:"labels,omitempty"`
-	Annotations map[string]string `yaml:"annotations,omitempty"`
-}
-
-// RuleNode adds yaml.v3 layer to support line and column outputs for invalid rules.
-type RuleNode struct {
-	Record      yaml.Node         `yaml:"record,omitempty"`
-	Alert       yaml.Node         `yaml:"alert,omitempty"`
-	Expr        yaml.Node         `yaml:"expr"`
-	For         model.Duration    `yaml:"for,omitempty"`
-	Labels      map[string]string `yaml:"labels,omitempty"`
-	Annotations map[string]string `yaml:"annotations,omitempty"`
-}
-
-// Validate the rule and return a list of encountered errors.
-func (r *RuleNode) Validate() (nodes []WrappedError) {
-	if r.Record.Value != "" && r.Alert.Value != "" {
-		nodes = append(nodes, WrappedError{
-			err:     errors.Errorf("only one of 'record' and 'alert' must be set"),
-			node:    &r.Record,
-			nodeAlt: &r.Alert,
-		})
-	}
-	if r.Record.Value == "" && r.Alert.Value == "" {
-		if r.Record.Value == "0" {
-			nodes = append(nodes, WrappedError{
-				err:  errors.Errorf("one of 'record' or 'alert' must be set"),
-				node: &r.Alert,
-			})
-		} else {
-			nodes = append(nodes, WrappedError{
-				err:  errors.Errorf("one of 'record' or 'alert' must be set"),
-				node: &r.Record,
-			})
-		}
-	}
-
-	if r.Expr.Value == "" {
-		nodes = append(nodes, WrappedError{
-			err:  errors.Errorf("field 'expr' must be set in rule"),
-			node: &r.Expr,
-		})
-	} else if _, err := parser.ParseExpr(r.Expr.Value); err != nil {
-		nodes = append(nodes, WrappedError{
-			err:  errors.Wrapf(err, "could not parse expression"),
-			node: &r.Expr,
-		})
-	}
-	if r.Record.Value != "" {
-		if len(r.Annotations) > 0 {
-			nodes = append(nodes, WrappedError{
-				err:  errors.Errorf("invalid field 'annotations' in recording rule"),
-				node: &r.Record,
-			})
-		}
-		if r.For != 0 {
-			nodes = append(nodes, WrappedError{
-				err:  errors.Errorf("invalid field 'for' in recording rule"),
-				node: &r.Record,
-			})
-		}
-		if !model.IsValidMetricName(model.LabelValue(r.Record.Value)) {
-			nodes = append(nodes, WrappedError{
-				err:  errors.Errorf("invalid recording rule name: %s", r.Record.Value),
-				node: &r.Record,
-			})
-		}
-	}
-
-	for k, v := range r.Labels {
-		if !model.LabelName(k).IsValid() || k == model.MetricNameLabel {
-			nodes = append(nodes, WrappedError{
-				err: errors.Errorf("invalid label name: %s", k),
-			})
-		}
-
-		if !model.LabelValue(v).IsValid() {
-			nodes = append(nodes, WrappedError{
-				err: errors.Errorf("invalid label value: %s", v),
-			})
-		}
-	}
-
-	for k := range r.Annotations {
-		if !model.LabelName(k).IsValid() {
-			nodes = append(nodes, WrappedError{
-				err: errors.Errorf("invalid annotation name: %s", k),
-			})
-		}
-	}
-
-	for _, err := range testTemplateParsing(r) {
-		nodes = append(nodes, WrappedError{err: err})
-	}
-
-	return
-}
-
-// testTemplateParsing checks if the templates used in labels and annotations
-// of the alerting rules are parsed correctly.
-func testTemplateParsing(rl *RuleNode) (errs []error) {
-	if rl.Alert.Value == "" {
-		// Not an alerting rule.
-		return errs
-	}
-
-	// Trying to parse templates.
-	tmplData := template.AlertTemplateData(map[string]string{}, map[string]string{}, "", 0)
-	defs := []string{
-		"{{$labels := .Labels}}",
-		"{{$externalLabels := .ExternalLabels}}",
-		"{{$externalURL := .ExternalURL}}",
-		"{{$value := .Value}}",
-	}
-	parseTest := func(text string) error {
-		tmpl := template.NewTemplateExpander(
-			context.TODO(),
-			strings.Join(append(defs, text), ""),
-			"__alert_"+rl.Alert.Value,
-			tmplData,
-			model.Time(timestamp.FromTime(time.Now())),
-			nil,
-			nil,
-			nil,
-		)
-		return tmpl.ParseTest()
-	}
-
-	// Parsing Labels.
-	for k, val := range rl.Labels {
-		err := parseTest(val)
-		if err != nil {
-			errs = append(errs, errors.Wrapf(err, "label %q", k))
-		}
-	}
-
-	// Parsing Annotations.
-	for k, val := range rl.Annotations {
-		err := parseTest(val)
-		if err != nil {
-			errs = append(errs, errors.Wrapf(err, "annotation %q", k))
-		}
-	}
-
-	return errs
-}
-
-// Parse parses and validates a set of rules.
-func Parse(content []byte) (*RuleGroups, []error) {
-	var (
-		groups RuleGroups
-		node   ruleGroups
-		errs   []error
-	)
-
-	decoder := yaml.NewDecoder(bytes.NewReader(content))
-	decoder.KnownFields(true)
-	err := decoder.Decode(&groups)
-	// Ignore io.EOF which happens with empty input.
-	if err != nil && err != io.EOF {
-		errs = append(errs, err)
-	}
-	err = yaml.Unmarshal(content, &node)
-	if err != nil {
-		errs = append(errs, err)
-	}
-
-	if len(errs) > 0 {
-		return nil, errs
-	}
-
-	return &groups, groups.Validate(node)
-}
-
-// ParseFile reads and parses rules from a file.
-func ParseFile(file string) (*RuleGroups, []error) {
-	b, err := ioutil.ReadFile(file)
-	if err != nil {
-		return nil, []error{errors.Wrap(err, file)}
-	}
-	rgs, errs := Parse(b)
-	for i := range errs {
-		errs[i] = errors.Wrap(errs[i], file)
-	}
-	return rgs, errs
-}
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/rulefmt_test.go 2.33.5+ds1-2/pkg/rulefmt/rulefmt_test.go
--- 2.31.2+ds1-1/pkg/rulefmt/rulefmt_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/rulefmt_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,186 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package rulefmt
-
-import (
-	"path/filepath"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-)
-
-func TestParseFileSuccess(t *testing.T) {
-	_, errs := ParseFile("testdata/test.yaml")
-	require.Empty(t, errs, "unexpected errors parsing file")
-}
-
-func TestParseFileFailure(t *testing.T) {
-	table := []struct {
-		filename string
-		errMsg   string
-	}{
-		{
-			filename: "duplicate_grp.bad.yaml",
-			errMsg:   "groupname: \"yolo\" is repeated in the same file",
-		},
-		{
-			filename: "bad_expr.bad.yaml",
-			errMsg:   "parse error",
-		},
-		{
-			filename: "record_and_alert.bad.yaml",
-			errMsg:   "only one of 'record' and 'alert' must be set",
-		},
-		{
-			filename: "no_rec_alert.bad.yaml",
-			errMsg:   "one of 'record' or 'alert' must be set",
-		},
-		{
-			filename: "noexpr.bad.yaml",
-			errMsg:   "field 'expr' must be set in rule",
-		},
-		{
-			filename: "bad_lname.bad.yaml",
-			errMsg:   "invalid label name",
-		},
-		{
-			filename: "bad_annotation.bad.yaml",
-			errMsg:   "invalid annotation name",
-		},
-		{
-			filename: "invalid_record_name.bad.yaml",
-			errMsg:   "invalid recording rule name",
-		},
-		{
-			filename: "bad_field.bad.yaml",
-			errMsg:   "field annotation not found",
-		},
-		{
-			filename: "invalid_label_name.bad.yaml",
-			errMsg:   "invalid label name",
-		},
-	}
-
-	for _, c := range table {
-		_, errs := ParseFile(filepath.Join("testdata", c.filename))
-		require.NotNil(t, errs, "Expected error parsing %s but got none", c.filename)
-		require.Error(t, errs[0], c.errMsg, "Expected error for %s.", c.filename)
-	}
-}
-
-func TestTemplateParsing(t *testing.T) {
-	tests := []struct {
-		ruleString string
-		shouldPass bool
-	}{
-		{
-			ruleString: `
-groups:
-- name: example
-  rules:
-  - alert: InstanceDown
-    expr: up == 0
-    for: 5m
-    labels:
-      severity: "page"
-    annotations:
-      summary: "Instance {{ $labels.instance }} down"
-`,
-			shouldPass: true,
-		},
-		{
-			// `$label` instead of `$labels`.
-			ruleString: `
-groups:
-- name: example
-  rules:
-  - alert: InstanceDown
-    expr: up == 0
-    for: 5m
-    labels:
-      severity: "page"
-    annotations:
-      summary: "Instance {{ $label.instance }} down"
-`,
-			shouldPass: false,
-		},
-		{
-			// `$this_is_wrong`.
-			ruleString: `
-groups:
-- name: example
-  rules:
-  - alert: InstanceDown
-    expr: up == 0
-    for: 5m
-    labels:
-      severity: "{{$this_is_wrong}}"
-    annotations:
-      summary: "Instance {{ $labels.instance }} down"
-`,
-			shouldPass: false,
-		},
-		{
-			// `$labels.quantile * 100`.
-			ruleString: `
-groups:
-- name: example
-  rules:
-  - alert: InstanceDown
-    expr: up == 0
-    for: 5m
-    labels:
-      severity: "page"
-    annotations:
-      summary: "Instance {{ $labels.instance }} down"
-      description: "{{$labels.quantile * 100}}"
-`,
-			shouldPass: false,
-		},
-	}
-
-	for _, tst := range tests {
-		rgs, errs := Parse([]byte(tst.ruleString))
-		require.NotNil(t, rgs, "Rule parsing, rule=\n"+tst.ruleString)
-		passed := (tst.shouldPass && len(errs) == 0) || (!tst.shouldPass && len(errs) > 0)
-		require.True(t, passed, "Rule validation failed, rule=\n"+tst.ruleString)
-	}
-}
-
-func TestUniqueErrorNodes(t *testing.T) {
-	group := `
-groups:
-- name: example
-  rules:
-  - alert: InstanceDown
-    expr: up ===== 0
-    for: 5m
-    labels:
-      severity: "page"
-    annotations:
-      summary: "Instance {{ $labels.instance }} down"
-  - alert: InstanceUp
-    expr: up ===== 1
-    for: 5m
-    labels:
-      severity: "page"
-    annotations:
-      summary: "Instance {{ $labels.instance }} up"
-`
-	_, errs := Parse([]byte(group))
-	require.Len(t, errs, 2, "Expected two errors")
-	err0 := errs[0].(*Error).Err.node
-	err1 := errs[1].(*Error).Err.node
-	require.NotEqual(t, err0, err1, "Error nodes should not be the same")
-}
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_annotation.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_annotation.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_annotation.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_annotation.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - alert: hola
-        expr: 1
-        annotations:
-          ins-tance: localhost
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_expr.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_expr.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_expr.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_expr.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,5 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - record: yolo
-        expr: rate(hi)
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_field.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_field.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_field.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_field.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,9 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - alert: hola
-        expr: 1
-        labels:
-          instance: localhost
-        annotation:
-          summary: annonations is written without s above
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_lname.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_lname.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/bad_lname.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/bad_lname.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - record: hola
-        expr: 1
-        labels:
-          ins-tance: localhost
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/duplicate_grp.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/duplicate_grp.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/duplicate_grp.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/duplicate_grp.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,3 +0,0 @@
-groups:
-  - name: yolo
-  - name: yolo
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/invalid_label_name.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/invalid_label_name.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/invalid_label_name.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/invalid_label_name.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,7 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - record: hola
-        expr: 1
-        labels:
-          __name__: anything
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/invalid_record_name.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/invalid_record_name.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/invalid_record_name.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/invalid_record_name.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,5 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - record: strawberry{flavor="sweet"}
-        expr: 1
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/noexpr.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/noexpr.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/noexpr.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/noexpr.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,4 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - record: ylo
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/no_rec_alert.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/no_rec_alert.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/no_rec_alert.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/no_rec_alert.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,4 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - expr: 1
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/record_and_alert.bad.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/record_and_alert.bad.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/record_and_alert.bad.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/record_and_alert.bad.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,6 +0,0 @@
-groups:
-  - name: yolo
-    rules:
-      - record: Hi
-        alert: Hello
-        expr: 1
diff -pruN 2.31.2+ds1-1/pkg/rulefmt/testdata/test.yaml 2.33.5+ds1-2/pkg/rulefmt/testdata/test.yaml
--- 2.31.2+ds1-1/pkg/rulefmt/testdata/test.yaml	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/rulefmt/testdata/test.yaml	1970-01-01 00:00:00.000000000 +0000
@@ -1,64 +0,0 @@
-groups:
-  - name: my-group-name
-    interval: 30s # defaults to global interval
-    rules:
-      - alert: HighErrors
-        expr: |
-          sum without(instance) (rate(errors_total[5m]))
-          /
-          sum without(instance) (rate(requests_total[5m]))
-        for: 5m
-        labels:
-          severity: critical
-        annotations:
-          description: "stuff's happening with {{ $.labels.service }}"
-
-      # Mix recording rules in the same list
-      - record: "new_metric"
-        expr: |
-          sum without(instance) (rate(errors_total[5m]))
-          /
-          sum without(instance) (rate(requests_total[5m]))
-        labels:
-          abc: edf
-          uvw: xyz
-
-      - alert: HighErrors
-        expr: |
-          sum without(instance) (rate(errors_total[5m]))
-          /
-          sum without(instance) (rate(requests_total[5m]))
-        for: 5m
-        labels:
-          severity: critical
-        annotations:
-          description: "stuff's happening with {{ $.labels.service }}"
-
-  - name: my-another-name
-    interval: 30s # defaults to global interval
-    rules:
-      - alert: HighErrors
-        expr: |
-          sum without(instance) (rate(errors_total[5m]))
-          /
-          sum without(instance) (rate(requests_total[5m]))
-        for: 5m
-        labels:
-          severity: critical
-
-      - record: "new_metric"
-        expr: |
-          sum without(instance) (rate(errors_total[5m]))
-          /
-          sum without(instance) (rate(requests_total[5m]))
-
-      - alert: HighErrors
-        expr: |
-          sum without(instance) (rate(errors_total[5m]))
-          /
-          sum without(instance) (rate(requests_total[5m]))
-        for: 5m
-        labels:
-          severity: critical
-        annotations:
-          description: "stuff's happening with {{ $.labels.service }}"
diff -pruN 2.31.2+ds1-1/pkg/runtime/limits_default.go 2.33.5+ds1-2/pkg/runtime/limits_default.go
--- 2.31.2+ds1-1/pkg/runtime/limits_default.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/limits_default.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,48 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows
-// +build !windows
-
-package runtime
-
-import (
-	"fmt"
-	"syscall"
-)
-
-// syscall.RLIM_INFINITY is a constant and its default type is int.
-// It needs to be converted to an int64 variable to be compared with uint64 values.
-// See https://golang.org/ref/spec#Conversions
-var unlimited int64 = syscall.RLIM_INFINITY
-
-func limitToString(v uint64, unit string) string {
-	if v == uint64(unlimited) {
-		return "unlimited"
-	}
-	return fmt.Sprintf("%d%s", v, unit)
-}
-
-func getLimits(resource int, unit string) string {
-	rlimit := syscall.Rlimit{}
-	err := syscall.Getrlimit(resource, &rlimit)
-	if err != nil {
-		panic("syscall.Getrlimit failed: " + err.Error())
-	}
-	return fmt.Sprintf("(soft=%s, hard=%s)", limitToString(uint64(rlimit.Cur), unit), limitToString(uint64(rlimit.Max), unit))
-}
-
-// FdLimits returns the soft and hard limits for file descriptors.
-func FdLimits() string {
-	return getLimits(syscall.RLIMIT_NOFILE, "")
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/limits_windows.go 2.33.5+ds1-2/pkg/runtime/limits_windows.go
--- 2.31.2+ds1-1/pkg/runtime/limits_windows.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/limits_windows.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,27 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build windows
-// +build windows
-
-package runtime
-
-// FdLimits not supported on Windows
-func FdLimits() string {
-	return "N/A"
-}
-
-// VMLimits not supported on Windows
-func VMLimits() string {
-	return "N/A"
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/statfs_default.go 2.33.5+ds1-2/pkg/runtime/statfs_default.go
--- 2.31.2+ds1-1/pkg/runtime/statfs_default.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/statfs_default.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,83 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows && !openbsd && !netbsd && !solaris && !386
-// +build !windows,!openbsd,!netbsd,!solaris,!386
-
-package runtime
-
-import (
-	"strconv"
-	"syscall"
-)
-
-// Statfs returns the file system type (Unix only)
-func Statfs(path string) string {
-
-	// Types of file systems that may be returned by `statfs`
-	fsTypes := map[int64]string{
-		0xadf5:     "ADFS_SUPER_MAGIC",
-		0xADFF:     "AFFS_SUPER_MAGIC",
-		0x42465331: "BEFS_SUPER_MAGIC",
-		0x1BADFACE: "BFS_MAGIC",
-		0xFF534D42: "CIFS_MAGIC_NUMBER",
-		0x73757245: "CODA_SUPER_MAGIC",
-		0x012FF7B7: "COH_SUPER_MAGIC",
-		0x28cd3d45: "CRAMFS_MAGIC",
-		0x1373:     "DEVFS_SUPER_MAGIC",
-		0x00414A53: "EFS_SUPER_MAGIC",
-		0x137D:     "EXT_SUPER_MAGIC",
-		0xEF51:     "EXT2_OLD_SUPER_MAGIC",
-		0xEF53:     "EXT4_SUPER_MAGIC",
-		0x4244:     "HFS_SUPER_MAGIC",
-		0xF995E849: "HPFS_SUPER_MAGIC",
-		0x958458f6: "HUGETLBFS_MAGIC",
-		0x9660:     "ISOFS_SUPER_MAGIC",
-		0x72b6:     "JFFS2_SUPER_MAGIC",
-		0x3153464a: "JFS_SUPER_MAGIC",
-		0x137F:     "MINIX_SUPER_MAGIC",
-		0x138F:     "MINIX_SUPER_MAGIC2",
-		0x2468:     "MINIX2_SUPER_MAGIC",
-		0x2478:     "MINIX2_SUPER_MAGIC2",
-		0x4d44:     "MSDOS_SUPER_MAGIC",
-		0x564c:     "NCP_SUPER_MAGIC",
-		0x6969:     "NFS_SUPER_MAGIC",
-		0x5346544e: "NTFS_SB_MAGIC",
-		0x9fa1:     "OPENPROM_SUPER_MAGIC",
-		0x9fa0:     "PROC_SUPER_MAGIC",
-		0x002f:     "QNX4_SUPER_MAGIC",
-		0x52654973: "REISERFS_SUPER_MAGIC",
-		0x7275:     "ROMFS_MAGIC",
-		0x517B:     "SMB_SUPER_MAGIC",
-		0x012FF7B6: "SYSV2_SUPER_MAGIC",
-		0x012FF7B5: "SYSV4_SUPER_MAGIC",
-		0x01021994: "TMPFS_MAGIC",
-		0x15013346: "UDF_SUPER_MAGIC",
-		0x00011954: "UFS_MAGIC",
-		0x9fa2:     "USBDEVICE_SUPER_MAGIC",
-		0xa501FCF5: "VXFS_SUPER_MAGIC",
-		0x012FF7B4: "XENIX_SUPER_MAGIC",
-		0x58465342: "XFS_SUPER_MAGIC",
-		0x012FD16D: "_XIAFS_SUPER_MAGIC",
-	}
-
-	var fs syscall.Statfs_t
-	err := syscall.Statfs(path, &fs)
-	if err != nil {
-		return strconv.FormatInt(int64(fs.Type), 16)
-	}
-	if fsType, ok := fsTypes[int64(fs.Type)]; ok {
-		return fsType
-	}
-	return strconv.FormatInt(int64(fs.Type), 16)
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/statfs.go 2.33.5+ds1-2/pkg/runtime/statfs.go
--- 2.31.2+ds1-1/pkg/runtime/statfs.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/statfs.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,23 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build openbsd || windows || netbsd || solaris
-// +build openbsd windows netbsd solaris
-
-package runtime
-
-// Statfs returns the file system type (Unix only)
-// syscall.Statfs_t isn't available on openbsd
-func Statfs(path string) string {
-	return "unknown"
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/statfs_linux_386.go 2.33.5+ds1-2/pkg/runtime/statfs_linux_386.go
--- 2.31.2+ds1-1/pkg/runtime/statfs_linux_386.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/statfs_linux_386.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,79 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build linux && 386
-// +build linux,386
-
-package runtime
-
-import (
-	"strconv"
-	"syscall"
-)
-
-// Statfs returns the file system type (Unix only)
-func Statfs(path string) string {
-
-	// Types of file systems that may be returned by `statfs`
-	fsTypes := map[int32]string{
-		0xadf5:     "ADFS_SUPER_MAGIC",
-		0xADFF:     "AFFS_SUPER_MAGIC",
-		0x42465331: "BEFS_SUPER_MAGIC",
-		0x1BADFACE: "BFS_MAGIC",
-		0x73757245: "CODA_SUPER_MAGIC",
-		0x012FF7B7: "COH_SUPER_MAGIC",
-		0x28cd3d45: "CRAMFS_MAGIC",
-		0x1373:     "DEVFS_SUPER_MAGIC",
-		0x00414A53: "EFS_SUPER_MAGIC",
-		0x137D:     "EXT_SUPER_MAGIC",
-		0xEF51:     "EXT2_OLD_SUPER_MAGIC",
-		0xEF53:     "EXT4_SUPER_MAGIC",
-		0x4244:     "HFS_SUPER_MAGIC",
-		0x9660:     "ISOFS_SUPER_MAGIC",
-		0x72b6:     "JFFS2_SUPER_MAGIC",
-		0x3153464a: "JFS_SUPER_MAGIC",
-		0x137F:     "MINIX_SUPER_MAGIC",
-		0x138F:     "MINIX_SUPER_MAGIC2",
-		0x2468:     "MINIX2_SUPER_MAGIC",
-		0x2478:     "MINIX2_SUPER_MAGIC2",
-		0x4d44:     "MSDOS_SUPER_MAGIC",
-		0x564c:     "NCP_SUPER_MAGIC",
-		0x6969:     "NFS_SUPER_MAGIC",
-		0x5346544e: "NTFS_SB_MAGIC",
-		0x9fa1:     "OPENPROM_SUPER_MAGIC",
-		0x9fa0:     "PROC_SUPER_MAGIC",
-		0x002f:     "QNX4_SUPER_MAGIC",
-		0x52654973: "REISERFS_SUPER_MAGIC",
-		0x7275:     "ROMFS_MAGIC",
-		0x517B:     "SMB_SUPER_MAGIC",
-		0x012FF7B6: "SYSV2_SUPER_MAGIC",
-		0x012FF7B5: "SYSV4_SUPER_MAGIC",
-		0x01021994: "TMPFS_MAGIC",
-		0x15013346: "UDF_SUPER_MAGIC",
-		0x00011954: "UFS_MAGIC",
-		0x9fa2:     "USBDEVICE_SUPER_MAGIC",
-		0x012FF7B4: "XENIX_SUPER_MAGIC",
-		0x58465342: "XFS_SUPER_MAGIC",
-		0x012FD16D: "_XIAFS_SUPER_MAGIC",
-	}
-
-	var fs syscall.Statfs_t
-	err := syscall.Statfs(path, &fs)
-	if err != nil {
-		return strconv.Itoa(int(fs.Type))
-	}
-	if fsType, ok := fsTypes[fs.Type]; ok {
-		return fsType
-	}
-	return strconv.Itoa(int(fs.Type))
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/statfs_uint32.go 2.33.5+ds1-2/pkg/runtime/statfs_uint32.go
--- 2.31.2+ds1-1/pkg/runtime/statfs_uint32.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/statfs_uint32.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,79 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build (386 && darwin) || (386 && freebsd)
-// +build 386,darwin 386,freebsd
-
-package runtime
-
-import (
-	"strconv"
-	"syscall"
-)
-
-// Statfs returns the file system type (Unix only)
-func Statfs(path string) string {
-
-	// Types of file systems that may be returned by `statfs`
-	fsTypes := map[uint32]string{
-		0xadf5:     "ADFS_SUPER_MAGIC",
-		0xADFF:     "AFFS_SUPER_MAGIC",
-		0x42465331: "BEFS_SUPER_MAGIC",
-		0x1BADFACE: "BFS_MAGIC",
-		0x73757245: "CODA_SUPER_MAGIC",
-		0x012FF7B7: "COH_SUPER_MAGIC",
-		0x28cd3d45: "CRAMFS_MAGIC",
-		0x1373:     "DEVFS_SUPER_MAGIC",
-		0x00414A53: "EFS_SUPER_MAGIC",
-		0x137D:     "EXT_SUPER_MAGIC",
-		0xEF51:     "EXT2_OLD_SUPER_MAGIC",
-		0xEF53:     "EXT4_SUPER_MAGIC",
-		0x4244:     "HFS_SUPER_MAGIC",
-		0x9660:     "ISOFS_SUPER_MAGIC",
-		0x72b6:     "JFFS2_SUPER_MAGIC",
-		0x3153464a: "JFS_SUPER_MAGIC",
-		0x137F:     "MINIX_SUPER_MAGIC",
-		0x138F:     "MINIX_SUPER_MAGIC2",
-		0x2468:     "MINIX2_SUPER_MAGIC",
-		0x2478:     "MINIX2_SUPER_MAGIC2",
-		0x4d44:     "MSDOS_SUPER_MAGIC",
-		0x564c:     "NCP_SUPER_MAGIC",
-		0x6969:     "NFS_SUPER_MAGIC",
-		0x5346544e: "NTFS_SB_MAGIC",
-		0x9fa1:     "OPENPROM_SUPER_MAGIC",
-		0x9fa0:     "PROC_SUPER_MAGIC",
-		0x002f:     "QNX4_SUPER_MAGIC",
-		0x52654973: "REISERFS_SUPER_MAGIC",
-		0x7275:     "ROMFS_MAGIC",
-		0x517B:     "SMB_SUPER_MAGIC",
-		0x012FF7B6: "SYSV2_SUPER_MAGIC",
-		0x012FF7B5: "SYSV4_SUPER_MAGIC",
-		0x01021994: "TMPFS_MAGIC",
-		0x15013346: "UDF_SUPER_MAGIC",
-		0x00011954: "UFS_MAGIC",
-		0x9fa2:     "USBDEVICE_SUPER_MAGIC",
-		0x012FF7B4: "XENIX_SUPER_MAGIC",
-		0x58465342: "XFS_SUPER_MAGIC",
-		0x012FD16D: "_XIAFS_SUPER_MAGIC",
-	}
-
-	var fs syscall.Statfs_t
-	err := syscall.Statfs(path, &fs)
-	if err != nil {
-		return strconv.Itoa(int(fs.Type))
-	}
-	if fsType, ok := fsTypes[fs.Type]; ok {
-		return fsType
-	}
-	return strconv.Itoa(int(fs.Type))
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/uname_default.go 2.33.5+ds1-2/pkg/runtime/uname_default.go
--- 2.31.2+ds1-1/pkg/runtime/uname_default.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/uname_default.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,24 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !linux
-// +build !linux
-
-package runtime
-
-import "runtime"
-
-// Uname for any platform other than linux.
-func Uname() string {
-	return "(" + runtime.GOOS + ")"
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/uname_linux.go 2.33.5+ds1-2/pkg/runtime/uname_linux.go
--- 2.31.2+ds1-1/pkg/runtime/uname_linux.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/uname_linux.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,37 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package runtime
-
-import (
-	"bytes"
-
-	"golang.org/x/sys/unix"
-)
-
-// Uname returns the uname of the host machine.
-func Uname() string {
-	buf := unix.Utsname{}
-	err := unix.Uname(&buf)
-	if err != nil {
-		panic("unix.Uname failed: " + err.Error())
-	}
-
-	str := "(" + string(buf.Sysname[:bytes.IndexByte(buf.Sysname[:], 0)])
-	str += " " + string(buf.Release[:bytes.IndexByte(buf.Release[:], 0)])
-	str += " " + string(buf.Version[:bytes.IndexByte(buf.Version[:], 0)])
-	str += " " + string(buf.Machine[:bytes.IndexByte(buf.Machine[:], 0)])
-	str += " " + string(buf.Nodename[:bytes.IndexByte(buf.Nodename[:], 0)])
-	str += " " + string(buf.Domainname[:bytes.IndexByte(buf.Domainname[:], 0)]) + ")"
-	return str
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/vmlimits_default.go 2.33.5+ds1-2/pkg/runtime/vmlimits_default.go
--- 2.31.2+ds1-1/pkg/runtime/vmlimits_default.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/vmlimits_default.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build !windows && !openbsd
-// +build !windows,!openbsd
-
-package runtime
-
-import (
-	"syscall"
-)
-
-// VMLimits returns the soft and hard limits for virtual memory.
-func VMLimits() string {
-	return getLimits(syscall.RLIMIT_AS, "b")
-}
diff -pruN 2.31.2+ds1-1/pkg/runtime/vmlimits_openbsd.go 2.33.5+ds1-2/pkg/runtime/vmlimits_openbsd.go
--- 2.31.2+ds1-1/pkg/runtime/vmlimits_openbsd.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/runtime/vmlimits_openbsd.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,26 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:build openbsd
-// +build openbsd
-
-package runtime
-
-import (
-	"syscall"
-)
-
-// VMLimits returns the soft and hard limits for virtual memory.
-func VMLimits() string {
-	return getLimits(syscall.RLIMIT_DATA, "b")
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/interface.go 2.33.5+ds1-2/pkg/textparse/interface.go
--- 2.31.2+ds1-1/pkg/textparse/interface.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/interface.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,96 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-	"mime"
-
-	"github.com/prometheus/prometheus/pkg/exemplar"
-	"github.com/prometheus/prometheus/pkg/labels"
-)
-
-// Parser parses samples from a byte slice of samples in the official
-// Prometheus and OpenMetrics text exposition formats.
-type Parser interface {
-	// Series returns the bytes of the series, the timestamp if set, and the value
-	// of the current sample.
-	Series() ([]byte, *int64, float64)
-
-	// Help returns the metric name and help text in the current entry.
-	// Must only be called after Next returned a help entry.
-	// The returned byte slices become invalid after the next call to Next.
-	Help() ([]byte, []byte)
-
-	// Type returns the metric name and type in the current entry.
-	// Must only be called after Next returned a type entry.
-	// The returned byte slices become invalid after the next call to Next.
-	Type() ([]byte, MetricType)
-
-	// Unit returns the metric name and unit in the current entry.
-	// Must only be called after Next returned a unit entry.
-	// The returned byte slices become invalid after the next call to Next.
-	Unit() ([]byte, []byte)
-
-	// Comment returns the text of the current comment.
-	// Must only be called after Next returned a comment entry.
-	// The returned byte slice becomes invalid after the next call to Next.
-	Comment() []byte
-
-	// Metric writes the labels of the current sample into the passed labels.
-	// It returns the string from which the metric was parsed.
-	Metric(l *labels.Labels) string
-
-	// Exemplar writes the exemplar of the current sample into the passed
-	// exemplar. It returns if an exemplar exists or not.
-	Exemplar(l *exemplar.Exemplar) bool
-
-	// Next advances the parser to the next sample. It returns false if no
-	// more samples were read or an error occurred.
-	Next() (Entry, error)
-}
-
-// New returns a new parser of the byte slice.
-func New(b []byte, contentType string) Parser {
-	mediaType, _, err := mime.ParseMediaType(contentType)
-	if err == nil && mediaType == "application/openmetrics-text" {
-		return NewOpenMetricsParser(b)
-	}
-	return NewPromParser(b)
-}
-
-// Entry represents the type of a parsed entry.
-type Entry int
-
-const (
-	EntryInvalid Entry = -1
-	EntryType    Entry = 0
-	EntryHelp    Entry = 1
-	EntrySeries  Entry = 2
-	EntryComment Entry = 3
-	EntryUnit    Entry = 4
-)
-
-// MetricType represents metric type values.
-type MetricType string
-
-const (
-	MetricTypeCounter        = MetricType("counter")
-	MetricTypeGauge          = MetricType("gauge")
-	MetricTypeHistogram      = MetricType("histogram")
-	MetricTypeGaugeHistogram = MetricType("gaugehistogram")
-	MetricTypeSummary        = MetricType("summary")
-	MetricTypeInfo           = MetricType("info")
-	MetricTypeStateset       = MetricType("stateset")
-	MetricTypeUnknown        = MetricType("unknown")
-)
diff -pruN 2.31.2+ds1-1/pkg/textparse/openmetricslex.l 2.33.5+ds1-2/pkg/textparse/openmetricslex.l
--- 2.31.2+ds1-1/pkg/textparse/openmetricslex.l	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/openmetricslex.l	1970-01-01 00:00:00.000000000 +0000
@@ -1,80 +0,0 @@
-%{
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-    "fmt"
-)
-
-// Lex is called by the parser generated by "go tool yacc" to obtain each
-// token. The method is opened before the matching rules block and closed at
-// the end of the file.
-func (l *openMetricsLexer) Lex() token {
-    if l.i >= len(l.b) {
-        return tEOF
-    }
-    c := l.b[l.i]
-    l.start = l.i
-
-%}
-
-D     [0-9]
-L     [a-zA-Z_]
-M     [a-zA-Z_:]
-C     [^\n]
-S     [ ]
-
-%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp sExemplar sEValue sETimestamp
-
-%yyc c
-%yyn c = l.next()
-%yyt l.state
-
-
-%%
-
-#{S}                                  l.state = sComment
-<sComment>HELP{S}                     l.state = sMeta1; return tHelp
-<sComment>TYPE{S}                     l.state = sMeta1; return tType
-<sComment>UNIT{S}                     l.state = sMeta1; return tUnit
-<sComment>"EOF"\n?                    l.state = sInit; return tEOFWord
-<sMeta1>{M}({M}|{D})*                 l.state = sMeta2; return tMName
-<sMeta2>{S}{C}*\n                     l.state = sInit; return tText
-
-{M}({M}|{D})*                         l.state = sValue; return tMName
-<sValue>\{                            l.state = sLabels; return tBraceOpen
-<sLabels>{L}({L}|{D})*                return tLName
-<sLabels>\}                           l.state = sValue; return tBraceClose
-<sLabels>=                            l.state = sLValue; return tEqual
-<sLabels>,                            return tComma
-<sLValue>\"(\\.|[^\\"\n])*\"          l.state = sLabels; return tLValue
-<sValue>{S}[^ \n]+                    l.state = sTimestamp; return tValue
-<sTimestamp>{S}[^ \n]+                return tTimestamp
-<sTimestamp>\n                        l.state = sInit; return tLinebreak
-<sTimestamp>{S}#{S}\{                 l.state = sExemplar; return tComment
-
-<sExemplar>{L}({L}|{D})*              return tLName
-<sExemplar>\}                         l.state = sEValue; return tBraceClose
-<sExemplar>=                          l.state = sEValue; return tEqual
-<sEValue>\"(\\.|[^\\"\n])*\"          l.state = sExemplar; return tLValue
-<sExemplar>,                          return tComma
-<sEValue>{S}[^ \n]+                   l.state = sETimestamp; return tValue
-<sETimestamp>{S}[^ \n]+               return tTimestamp
-<sETimestamp>\n                       l.state = sInit; return tLinebreak
-
-%%
-
-    return tInvalid
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/openmetricslex.l.go 2.33.5+ds1-2/pkg/textparse/openmetricslex.l.go
--- 2.31.2+ds1-1/pkg/textparse/openmetricslex.l.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/openmetricslex.l.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,762 +0,0 @@
-// Code generated by golex. DO NOT EDIT.
-
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-	"fmt"
-)
-
-// Lex is called by the parser generated by "go tool yacc" to obtain each
-// token. The method is opened before the matching rules block and closed at
-// the end of the file.
-func (l *openMetricsLexer) Lex() token {
-	if l.i >= len(l.b) {
-		return tEOF
-	}
-	c := l.b[l.i]
-	l.start = l.i
-
-yystate0:
-
-	switch yyt := l.state; yyt {
-	default:
-		panic(fmt.Errorf(`invalid start condition %d`, yyt))
-	case 0: // start condition: INITIAL
-		goto yystart1
-	case 1: // start condition: sComment
-		goto yystart5
-	case 2: // start condition: sMeta1
-		goto yystart25
-	case 3: // start condition: sMeta2
-		goto yystart27
-	case 4: // start condition: sLabels
-		goto yystart30
-	case 5: // start condition: sLValue
-		goto yystart35
-	case 6: // start condition: sValue
-		goto yystart39
-	case 7: // start condition: sTimestamp
-		goto yystart43
-	case 8: // start condition: sExemplar
-		goto yystart50
-	case 9: // start condition: sEValue
-		goto yystart55
-	case 10: // start condition: sETimestamp
-		goto yystart61
-	}
-
-	goto yystate0 // silence unused label error
-	goto yystate1 // silence unused label error
-yystate1:
-	c = l.next()
-yystart1:
-	switch {
-	default:
-		goto yyabort
-	case c == '#':
-		goto yystate2
-	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate4
-	}
-
-yystate2:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate3
-	}
-
-yystate3:
-	c = l.next()
-	goto yyrule1
-
-yystate4:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule8
-	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate4
-	}
-
-	goto yystate5 // silence unused label error
-yystate5:
-	c = l.next()
-yystart5:
-	switch {
-	default:
-		goto yyabort
-	case c == 'E':
-		goto yystate6
-	case c == 'H':
-		goto yystate10
-	case c == 'T':
-		goto yystate15
-	case c == 'U':
-		goto yystate20
-	}
-
-yystate6:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'O':
-		goto yystate7
-	}
-
-yystate7:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'F':
-		goto yystate8
-	}
-
-yystate8:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule5
-	case c == '\n':
-		goto yystate9
-	}
-
-yystate9:
-	c = l.next()
-	goto yyrule5
-
-yystate10:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'E':
-		goto yystate11
-	}
-
-yystate11:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'L':
-		goto yystate12
-	}
-
-yystate12:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'P':
-		goto yystate13
-	}
-
-yystate13:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate14
-	}
-
-yystate14:
-	c = l.next()
-	goto yyrule2
-
-yystate15:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'Y':
-		goto yystate16
-	}
-
-yystate16:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'P':
-		goto yystate17
-	}
-
-yystate17:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'E':
-		goto yystate18
-	}
-
-yystate18:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate19
-	}
-
-yystate19:
-	c = l.next()
-	goto yyrule3
-
-yystate20:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'N':
-		goto yystate21
-	}
-
-yystate21:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'I':
-		goto yystate22
-	}
-
-yystate22:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'T':
-		goto yystate23
-	}
-
-yystate23:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate24
-	}
-
-yystate24:
-	c = l.next()
-	goto yyrule4
-
-	goto yystate25 // silence unused label error
-yystate25:
-	c = l.next()
-yystart25:
-	switch {
-	default:
-		goto yyabort
-	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate26
-	}
-
-yystate26:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule6
-	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate26
-	}
-
-	goto yystate27 // silence unused label error
-yystate27:
-	c = l.next()
-yystart27:
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate28
-	}
-
-yystate28:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '\n':
-		goto yystate29
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
-		goto yystate28
-	}
-
-yystate29:
-	c = l.next()
-	goto yyrule7
-
-	goto yystate30 // silence unused label error
-yystate30:
-	c = l.next()
-yystart30:
-	switch {
-	default:
-		goto yyabort
-	case c == ',':
-		goto yystate31
-	case c == '=':
-		goto yystate32
-	case c == '}':
-		goto yystate34
-	case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate33
-	}
-
-yystate31:
-	c = l.next()
-	goto yyrule13
-
-yystate32:
-	c = l.next()
-	goto yyrule12
-
-yystate33:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule10
-	case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate33
-	}
-
-yystate34:
-	c = l.next()
-	goto yyrule11
-
-	goto yystate35 // silence unused label error
-yystate35:
-	c = l.next()
-yystart35:
-	switch {
-	default:
-		goto yyabort
-	case c == '"':
-		goto yystate36
-	}
-
-yystate36:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '"':
-		goto yystate37
-	case c == '\\':
-		goto yystate38
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
-		goto yystate36
-	}
-
-yystate37:
-	c = l.next()
-	goto yyrule14
-
-yystate38:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
-		goto yystate36
-	}
-
-	goto yystate39 // silence unused label error
-yystate39:
-	c = l.next()
-yystart39:
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate40
-	case c == '{':
-		goto yystate42
-	}
-
-yystate40:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate41
-	}
-
-yystate41:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule15
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate41
-	}
-
-yystate42:
-	c = l.next()
-	goto yyrule9
-
-	goto yystate43 // silence unused label error
-yystate43:
-	c = l.next()
-yystart43:
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate45
-	case c == '\n':
-		goto yystate44
-	}
-
-yystate44:
-	c = l.next()
-	goto yyrule17
-
-yystate45:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '#':
-		goto yystate47
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c == '!' || c == '"' || c >= '$' && c <= 'ÿ':
-		goto yystate46
-	}
-
-yystate46:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule16
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate46
-	}
-
-yystate47:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule16
-	case c == ' ':
-		goto yystate48
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate46
-	}
-
-yystate48:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '{':
-		goto yystate49
-	}
-
-yystate49:
-	c = l.next()
-	goto yyrule18
-
-	goto yystate50 // silence unused label error
-yystate50:
-	c = l.next()
-yystart50:
-	switch {
-	default:
-		goto yyabort
-	case c == ',':
-		goto yystate51
-	case c == '=':
-		goto yystate52
-	case c == '}':
-		goto yystate54
-	case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate53
-	}
-
-yystate51:
-	c = l.next()
-	goto yyrule23
-
-yystate52:
-	c = l.next()
-	goto yyrule21
-
-yystate53:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule19
-	case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate53
-	}
-
-yystate54:
-	c = l.next()
-	goto yyrule20
-
-	goto yystate55 // silence unused label error
-yystate55:
-	c = l.next()
-yystart55:
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate56
-	case c == '"':
-		goto yystate58
-	}
-
-yystate56:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate57
-	}
-
-yystate57:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule24
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate57
-	}
-
-yystate58:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '"':
-		goto yystate59
-	case c == '\\':
-		goto yystate60
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
-		goto yystate58
-	}
-
-yystate59:
-	c = l.next()
-	goto yyrule22
-
-yystate60:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
-		goto yystate58
-	}
-
-	goto yystate61 // silence unused label error
-yystate61:
-	c = l.next()
-yystart61:
-	switch {
-	default:
-		goto yyabort
-	case c == ' ':
-		goto yystate63
-	case c == '\n':
-		goto yystate62
-	}
-
-yystate62:
-	c = l.next()
-	goto yyrule26
-
-yystate63:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate64
-	}
-
-yystate64:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule25
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate64
-	}
-
-yyrule1: // #{S}
-	{
-		l.state = sComment
-		goto yystate0
-	}
-yyrule2: // HELP{S}
-	{
-		l.state = sMeta1
-		return tHelp
-		goto yystate0
-	}
-yyrule3: // TYPE{S}
-	{
-		l.state = sMeta1
-		return tType
-		goto yystate0
-	}
-yyrule4: // UNIT{S}
-	{
-		l.state = sMeta1
-		return tUnit
-		goto yystate0
-	}
-yyrule5: // "EOF"\n?
-	{
-		l.state = sInit
-		return tEOFWord
-		goto yystate0
-	}
-yyrule6: // {M}({M}|{D})*
-	{
-		l.state = sMeta2
-		return tMName
-		goto yystate0
-	}
-yyrule7: // {S}{C}*\n
-	{
-		l.state = sInit
-		return tText
-		goto yystate0
-	}
-yyrule8: // {M}({M}|{D})*
-	{
-		l.state = sValue
-		return tMName
-		goto yystate0
-	}
-yyrule9: // \{
-	{
-		l.state = sLabels
-		return tBraceOpen
-		goto yystate0
-	}
-yyrule10: // {L}({L}|{D})*
-	{
-		return tLName
-	}
-yyrule11: // \}
-	{
-		l.state = sValue
-		return tBraceClose
-		goto yystate0
-	}
-yyrule12: // =
-	{
-		l.state = sLValue
-		return tEqual
-		goto yystate0
-	}
-yyrule13: // ,
-	{
-		return tComma
-	}
-yyrule14: // \"(\\.|[^\\"\n])*\"
-	{
-		l.state = sLabels
-		return tLValue
-		goto yystate0
-	}
-yyrule15: // {S}[^ \n]+
-	{
-		l.state = sTimestamp
-		return tValue
-		goto yystate0
-	}
-yyrule16: // {S}[^ \n]+
-	{
-		return tTimestamp
-	}
-yyrule17: // \n
-	{
-		l.state = sInit
-		return tLinebreak
-		goto yystate0
-	}
-yyrule18: // {S}#{S}\{
-	{
-		l.state = sExemplar
-		return tComment
-		goto yystate0
-	}
-yyrule19: // {L}({L}|{D})*
-	{
-		return tLName
-	}
-yyrule20: // \}
-	{
-		l.state = sEValue
-		return tBraceClose
-		goto yystate0
-	}
-yyrule21: // =
-	{
-		l.state = sEValue
-		return tEqual
-		goto yystate0
-	}
-yyrule22: // \"(\\.|[^\\"\n])*\"
-	{
-		l.state = sExemplar
-		return tLValue
-		goto yystate0
-	}
-yyrule23: // ,
-	{
-		return tComma
-	}
-yyrule24: // {S}[^ \n]+
-	{
-		l.state = sETimestamp
-		return tValue
-		goto yystate0
-	}
-yyrule25: // {S}[^ \n]+
-	{
-		return tTimestamp
-	}
-yyrule26: // \n
-	{
-		l.state = sInit
-		return tLinebreak
-		goto yystate0
-	}
-	panic("unreachable")
-
-	goto yyabort // silence unused label error
-
-yyabort: // no lexem recognized
-
-	return tInvalid
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/openmetricsparse.go 2.33.5+ds1-2/pkg/textparse/openmetricsparse.go
--- 2.31.2+ds1-1/pkg/textparse/openmetricsparse.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/openmetricsparse.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,481 +0,0 @@
-// Copyright 2018 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:generate go get -u modernc.org/golex
-//go:generate golex -o=openmetricslex.l.go openmetricslex.l
-
-package textparse
-
-import (
-	"bytes"
-	"fmt"
-	"io"
-	"math"
-	"sort"
-	"strings"
-	"unicode/utf8"
-
-	"github.com/pkg/errors"
-
-	"github.com/prometheus/prometheus/pkg/exemplar"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/value"
-)
-
-var allowedSuffixes = [][]byte{[]byte("_total"), []byte("_bucket")}
-
-type openMetricsLexer struct {
-	b     []byte
-	i     int
-	start int
-	err   error
-	state int
-}
-
-// buf returns the buffer of the current token.
-func (l *openMetricsLexer) buf() []byte {
-	return l.b[l.start:l.i]
-}
-
-func (l *openMetricsLexer) cur() byte {
-	if l.i < len(l.b) {
-		return l.b[l.i]
-	}
-	return byte(' ')
-}
-
-// next advances the openMetricsLexer to the next character.
-func (l *openMetricsLexer) next() byte {
-	l.i++
-	if l.i >= len(l.b) {
-		l.err = io.EOF
-		return byte(tEOF)
-	}
-	// Lex struggles with null bytes. If we are in a label value or help string, where
-	// they are allowed, consume them here immediately.
-	for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
-		l.i++
-		if l.i >= len(l.b) {
-			l.err = io.EOF
-			return byte(tEOF)
-		}
-	}
-	return l.b[l.i]
-}
-
-func (l *openMetricsLexer) Error(es string) {
-	l.err = errors.New(es)
-}
-
-// OpenMetricsParser parses samples from a byte slice of samples in the official
-// OpenMetrics text exposition format.
-// This is based on the working draft https://docs.google.com/document/u/1/d/1KwV0mAXwwbvvifBvDKH_LU1YjyXE_wxCkHNoCGq1GX0/edit
-type OpenMetricsParser struct {
-	l       *openMetricsLexer
-	series  []byte
-	text    []byte
-	mtype   MetricType
-	val     float64
-	ts      int64
-	hasTS   bool
-	start   int
-	offsets []int
-
-	eOffsets      []int
-	exemplar      []byte
-	exemplarVal   float64
-	exemplarTs    int64
-	hasExemplarTs bool
-}
-
-// NewOpenMetricsParser returns a new parser of the byte slice.
-func NewOpenMetricsParser(b []byte) Parser {
-	return &OpenMetricsParser{l: &openMetricsLexer{b: b}}
-}
-
-// Series returns the bytes of the series, the timestamp if set, and the value
-// of the current sample.
-func (p *OpenMetricsParser) Series() ([]byte, *int64, float64) {
-	if p.hasTS {
-		ts := p.ts
-		return p.series, &ts, p.val
-	}
-	return p.series, nil, p.val
-}
-
-// Help returns the metric name and help text in the current entry.
-// Must only be called after Next returned a help entry.
-// The returned byte slices become invalid after the next call to Next.
-func (p *OpenMetricsParser) Help() ([]byte, []byte) {
-	m := p.l.b[p.offsets[0]:p.offsets[1]]
-
-	// Replacer causes allocations. Replace only when necessary.
-	if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
-		// OpenMetrics always uses the Prometheus format label value escaping.
-		return m, []byte(lvalReplacer.Replace(string(p.text)))
-	}
-	return m, p.text
-}
-
-// Type returns the metric name and type in the current entry.
-// Must only be called after Next returned a type entry.
-// The returned byte slices become invalid after the next call to Next.
-func (p *OpenMetricsParser) Type() ([]byte, MetricType) {
-	return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
-}
-
-// Unit returns the metric name and unit in the current entry.
-// Must only be called after Next returned a unit entry.
-// The returned byte slices become invalid after the next call to Next.
-func (p *OpenMetricsParser) Unit() ([]byte, []byte) {
-	// The Prometheus format does not have units.
-	return p.l.b[p.offsets[0]:p.offsets[1]], p.text
-}
-
-// Comment returns the text of the current comment.
-// Must only be called after Next returned a comment entry.
-// The returned byte slice becomes invalid after the next call to Next.
-func (p *OpenMetricsParser) Comment() []byte {
-	return p.text
-}
-
-// Metric writes the labels of the current sample into the passed labels.
-// It returns the string from which the metric was parsed.
-func (p *OpenMetricsParser) Metric(l *labels.Labels) string {
-	// Allocate the full immutable string immediately, so we just
-	// have to create references on it below.
-	s := string(p.series)
-
-	*l = append(*l, labels.Label{
-		Name:  labels.MetricName,
-		Value: s[:p.offsets[0]-p.start],
-	})
-
-	for i := 1; i < len(p.offsets); i += 4 {
-		a := p.offsets[i] - p.start
-		b := p.offsets[i+1] - p.start
-		c := p.offsets[i+2] - p.start
-		d := p.offsets[i+3] - p.start
-
-		// Replacer causes allocations. Replace only when necessary.
-		if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
-			*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
-			continue
-		}
-		*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
-	}
-
-	// Sort labels. We can skip the first entry since the metric name is
-	// already at the right place.
-	sort.Sort((*l)[1:])
-
-	return s
-}
-
-// Exemplar writes the exemplar of the current sample into the passed
-// exemplar. It returns the whether an exemplar exists.
-func (p *OpenMetricsParser) Exemplar(e *exemplar.Exemplar) bool {
-	if len(p.exemplar) == 0 {
-		return false
-	}
-
-	// Allocate the full immutable string immediately, so we just
-	// have to create references on it below.
-	s := string(p.exemplar)
-
-	e.Value = p.exemplarVal
-	if p.hasExemplarTs {
-		e.HasTs = true
-		e.Ts = p.exemplarTs
-	}
-
-	for i := 0; i < len(p.eOffsets); i += 4 {
-		a := p.eOffsets[i] - p.start
-		b := p.eOffsets[i+1] - p.start
-		c := p.eOffsets[i+2] - p.start
-		d := p.eOffsets[i+3] - p.start
-
-		e.Labels = append(e.Labels, labels.Label{Name: s[a:b], Value: s[c:d]})
-	}
-
-	// Sort the labels.
-	sort.Sort(e.Labels)
-
-	return true
-}
-
-// nextToken returns the next token from the openMetricsLexer.
-func (p *OpenMetricsParser) nextToken() token {
-	tok := p.l.Lex()
-	return tok
-}
-
-// Next advances the parser to the next sample. It returns false if no
-// more samples were read or an error occurred.
-func (p *OpenMetricsParser) Next() (Entry, error) {
-	var err error
-
-	p.start = p.l.i
-	p.offsets = p.offsets[:0]
-	p.eOffsets = p.eOffsets[:0]
-	p.exemplar = p.exemplar[:0]
-	p.exemplarVal = 0
-	p.hasExemplarTs = false
-
-	switch t := p.nextToken(); t {
-	case tEOFWord:
-		if t := p.nextToken(); t != tEOF {
-			return EntryInvalid, errors.New("unexpected data after # EOF")
-		}
-		return EntryInvalid, io.EOF
-	case tEOF:
-		return EntryInvalid, errors.New("data does not end with # EOF")
-	case tHelp, tType, tUnit:
-		switch t := p.nextToken(); t {
-		case tMName:
-			p.offsets = append(p.offsets, p.l.start, p.l.i)
-		default:
-			return EntryInvalid, parseError("expected metric name after HELP", t)
-		}
-		switch t := p.nextToken(); t {
-		case tText:
-			if len(p.l.buf()) > 1 {
-				p.text = p.l.buf()[1 : len(p.l.buf())-1]
-			} else {
-				p.text = []byte{}
-			}
-		default:
-			return EntryInvalid, parseError("expected text in HELP", t)
-		}
-		switch t {
-		case tType:
-			switch s := yoloString(p.text); s {
-			case "counter":
-				p.mtype = MetricTypeCounter
-			case "gauge":
-				p.mtype = MetricTypeGauge
-			case "histogram":
-				p.mtype = MetricTypeHistogram
-			case "gaugehistogram":
-				p.mtype = MetricTypeGaugeHistogram
-			case "summary":
-				p.mtype = MetricTypeSummary
-			case "info":
-				p.mtype = MetricTypeInfo
-			case "stateset":
-				p.mtype = MetricTypeStateset
-			case "unknown":
-				p.mtype = MetricTypeUnknown
-			default:
-				return EntryInvalid, errors.Errorf("invalid metric type %q", s)
-			}
-		case tHelp:
-			if !utf8.Valid(p.text) {
-				return EntryInvalid, errors.New("help text is not a valid utf8 string")
-			}
-		}
-		switch t {
-		case tHelp:
-			return EntryHelp, nil
-		case tType:
-			return EntryType, nil
-		case tUnit:
-			m := yoloString(p.l.b[p.offsets[0]:p.offsets[1]])
-			u := yoloString(p.text)
-			if len(u) > 0 {
-				if !strings.HasSuffix(m, u) || len(m) < len(u)+1 || p.l.b[p.offsets[1]-len(u)-1] != '_' {
-					return EntryInvalid, errors.Errorf("unit not a suffix of metric %q", m)
-				}
-			}
-			return EntryUnit, nil
-		}
-
-	case tMName:
-		p.offsets = append(p.offsets, p.l.i)
-		p.series = p.l.b[p.start:p.l.i]
-
-		t2 := p.nextToken()
-		if t2 == tBraceOpen {
-			p.offsets, err = p.parseLVals(p.offsets)
-			if err != nil {
-				return EntryInvalid, err
-			}
-			p.series = p.l.b[p.start:p.l.i]
-			t2 = p.nextToken()
-		}
-		p.val, err = p.getFloatValue(t2, "metric")
-		if err != nil {
-			return EntryInvalid, err
-		}
-
-		p.hasTS = false
-		switch t2 := p.nextToken(); t2 {
-		case tEOF:
-			return EntryInvalid, errors.New("data does not end with # EOF")
-		case tLinebreak:
-			break
-		case tComment:
-			if err := p.parseComment(); err != nil {
-				return EntryInvalid, err
-			}
-		case tTimestamp:
-			p.hasTS = true
-			var ts float64
-			// A float is enough to hold what we need for millisecond resolution.
-			if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
-				return EntryInvalid, err
-			}
-			if math.IsNaN(ts) || math.IsInf(ts, 0) {
-				return EntryInvalid, errors.New("invalid timestamp")
-			}
-			p.ts = int64(ts * 1000)
-			switch t3 := p.nextToken(); t3 {
-			case tLinebreak:
-			case tComment:
-				if err := p.parseComment(); err != nil {
-					return EntryInvalid, err
-				}
-			default:
-				return EntryInvalid, parseError("expected next entry after timestamp", t3)
-			}
-		default:
-			return EntryInvalid, parseError("expected timestamp or # symbol", t2)
-		}
-		return EntrySeries, nil
-
-	default:
-		err = errors.Errorf("%q %q is not a valid start token", t, string(p.l.cur()))
-	}
-	return EntryInvalid, err
-}
-
-func (p *OpenMetricsParser) parseComment() error {
-	// Validate the name of the metric. It must have _total or _bucket as
-	// suffix for exemplars to be supported.
-	if err := p.validateNameForExemplar(p.series[:p.offsets[0]-p.start]); err != nil {
-		return err
-	}
-
-	var err error
-	// Parse the labels.
-	p.eOffsets, err = p.parseLVals(p.eOffsets)
-	if err != nil {
-		return err
-	}
-	p.exemplar = p.l.b[p.start:p.l.i]
-
-	// Get the value.
-	p.exemplarVal, err = p.getFloatValue(p.nextToken(), "exemplar labels")
-	if err != nil {
-		return err
-	}
-
-	// Read the optional timestamp.
-	p.hasExemplarTs = false
-	switch t2 := p.nextToken(); t2 {
-	case tEOF:
-		return errors.New("data does not end with # EOF")
-	case tLinebreak:
-		break
-	case tTimestamp:
-		p.hasExemplarTs = true
-		var ts float64
-		// A float is enough to hold what we need for millisecond resolution.
-		if ts, err = parseFloat(yoloString(p.l.buf()[1:])); err != nil {
-			return err
-		}
-		if math.IsNaN(ts) || math.IsInf(ts, 0) {
-			return errors.New("invalid exemplar timestamp")
-		}
-		p.exemplarTs = int64(ts * 1000)
-		switch t3 := p.nextToken(); t3 {
-		case tLinebreak:
-		default:
-			return parseError("expected next entry after exemplar timestamp", t3)
-		}
-	default:
-		return parseError("expected timestamp or comment", t2)
-	}
-	return nil
-}
-
-func (p *OpenMetricsParser) parseLVals(offsets []int) ([]int, error) {
-	first := true
-	for {
-		t := p.nextToken()
-		switch t {
-		case tBraceClose:
-			return offsets, nil
-		case tComma:
-			if first {
-				return nil, parseError("expected label name or left brace", t)
-			}
-			t = p.nextToken()
-			if t != tLName {
-				return nil, parseError("expected label name", t)
-			}
-		case tLName:
-			if !first {
-				return nil, parseError("expected comma", t)
-			}
-		default:
-			if first {
-				return nil, parseError("expected label name or left brace", t)
-			}
-			return nil, parseError("expected comma or left brace", t)
-
-		}
-		first = false
-		// t is now a label name.
-
-		offsets = append(offsets, p.l.start, p.l.i)
-
-		if t := p.nextToken(); t != tEqual {
-			return nil, parseError("expected equal", t)
-		}
-		if t := p.nextToken(); t != tLValue {
-			return nil, parseError("expected label value", t)
-		}
-		if !utf8.Valid(p.l.buf()) {
-			return nil, errors.New("invalid UTF-8 label value")
-		}
-
-		// The openMetricsLexer ensures the value string is quoted. Strip first
-		// and last character.
-		offsets = append(offsets, p.l.start+1, p.l.i-1)
-	}
-}
-
-func (p *OpenMetricsParser) getFloatValue(t token, after string) (float64, error) {
-	if t != tValue {
-		return 0, parseError(fmt.Sprintf("expected value after %v", after), t)
-	}
-	val, err := parseFloat(yoloString(p.l.buf()[1:]))
-	if err != nil {
-		return 0, err
-	}
-	// Ensure canonical NaN value.
-	if math.IsNaN(p.exemplarVal) {
-		val = math.Float64frombits(value.NormalNaN)
-	}
-	return val, nil
-}
-
-func (p *OpenMetricsParser) validateNameForExemplar(name []byte) error {
-	for _, suffix := range allowedSuffixes {
-		if bytes.HasSuffix(name, suffix) {
-			return nil
-		}
-	}
-	return fmt.Errorf("metric name %v does not support exemplars", string(name))
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/openmetricsparse_test.go 2.33.5+ds1-2/pkg/textparse/openmetricsparse_test.go
--- 2.31.2+ds1-1/pkg/textparse/openmetricsparse_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/openmetricsparse_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,612 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-	"io"
-	"testing"
-
-	"github.com/stretchr/testify/require"
-
-	"github.com/prometheus/prometheus/pkg/exemplar"
-	"github.com/prometheus/prometheus/pkg/labels"
-)
-
-func TestOpenMetricsParse(t *testing.T) {
-	input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
-# TYPE go_gc_duration_seconds summary
-# UNIT go_gc_duration_seconds seconds
-go_gc_duration_seconds{quantile="0"} 4.9351e-05
-go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
-go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
-# HELP nohelp1 
-# HELP help2 escape \ \n \\ \" \x chars
-# UNIT nounit 
-go_gc_duration_seconds{quantile="1.0",a="b"} 8.3835e-05
-go_gc_duration_seconds_count 99
-some:aggregate:rate5m{a_b="c"} 1
-# HELP go_goroutines Number of goroutines that currently exist.
-# TYPE go_goroutines gauge
-go_goroutines 33 123.123
-# TYPE hh histogram
-hh_bucket{le="+Inf"} 1
-# TYPE gh gaugehistogram
-gh_bucket{le="+Inf"} 1
-# TYPE hhh histogram
-hhh_bucket{le="+Inf"} 1 # {aa="bb"} 4
-# TYPE ggh gaugehistogram
-ggh_bucket{le="+Inf"} 1 # {cc="dd",xx="yy"} 4 123.123
-# TYPE ii info
-ii{foo="bar"} 1
-# TYPE ss stateset
-ss{ss="foo"} 1
-ss{ss="bar"} 0
-# TYPE un unknown
-_metric_starting_with_underscore 1
-testmetric{_label_starting_with_underscore="foo"} 1
-testmetric{label="\"bar\""} 1
-# TYPE foo counter
-foo_total 17.0 1520879607.789 # {xx="yy"} 5`
-
-	input += "\n# HELP metric foo\x00bar"
-	input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
-	input += "\n# EOF\n"
-
-	int64p := func(x int64) *int64 { return &x }
-
-	exp := []struct {
-		lset    labels.Labels
-		m       string
-		t       *int64
-		v       float64
-		typ     MetricType
-		help    string
-		unit    string
-		comment string
-		e       *exemplar.Exemplar
-	}{
-		{
-			m:    "go_gc_duration_seconds",
-			help: "A summary of the GC invocation durations.",
-		}, {
-			m:   "go_gc_duration_seconds",
-			typ: MetricTypeSummary,
-		}, {
-			m:    "go_gc_duration_seconds",
-			unit: "seconds",
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0"}`,
-			v:    4.9351e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0.25"}`,
-			v:    7.424100000000001e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0.5",a="b"}`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
-		}, {
-			m:    "nohelp1",
-			help: "",
-		}, {
-			m:    "help2",
-			help: "escape \\ \n \\ \" \\x chars",
-		}, {
-			m:    "nounit",
-			unit: "",
-		}, {
-			m:    `go_gc_duration_seconds{quantile="1.0",a="b"}`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds_count`,
-			v:    99,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
-		}, {
-			m:    `some:aggregate:rate5m{a_b="c"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
-		}, {
-			m:    "go_goroutines",
-			help: "Number of goroutines that currently exist.",
-		}, {
-			m:   "go_goroutines",
-			typ: MetricTypeGauge,
-		}, {
-			m:    `go_goroutines`,
-			v:    33,
-			t:    int64p(123123),
-			lset: labels.FromStrings("__name__", "go_goroutines"),
-		}, {
-			m:   "hh",
-			typ: MetricTypeHistogram,
-		}, {
-			m:    `hh_bucket{le="+Inf"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "hh_bucket", "le", "+Inf"),
-		}, {
-			m:   "gh",
-			typ: MetricTypeGaugeHistogram,
-		}, {
-			m:    `gh_bucket{le="+Inf"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "gh_bucket", "le", "+Inf"),
-		}, {
-			m:   "hhh",
-			typ: MetricTypeHistogram,
-		}, {
-			m:    `hhh_bucket{le="+Inf"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "hhh_bucket", "le", "+Inf"),
-			e:    &exemplar.Exemplar{Labels: labels.FromStrings("aa", "bb"), Value: 4},
-		}, {
-			m:   "ggh",
-			typ: MetricTypeGaugeHistogram,
-		}, {
-			m:    `ggh_bucket{le="+Inf"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "ggh_bucket", "le", "+Inf"),
-			e:    &exemplar.Exemplar{Labels: labels.FromStrings("cc", "dd", "xx", "yy"), Value: 4, HasTs: true, Ts: 123123},
-		}, {
-			m:   "ii",
-			typ: MetricTypeInfo,
-		}, {
-			m:    `ii{foo="bar"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "ii", "foo", "bar"),
-		}, {
-			m:   "ss",
-			typ: MetricTypeStateset,
-		}, {
-			m:    `ss{ss="foo"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "ss", "ss", "foo"),
-		}, {
-			m:    `ss{ss="bar"}`,
-			v:    0,
-			lset: labels.FromStrings("__name__", "ss", "ss", "bar"),
-		}, {
-			m:   "un",
-			typ: MetricTypeUnknown,
-		}, {
-			m:    "_metric_starting_with_underscore",
-			v:    1,
-			lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
-		}, {
-			m:    "testmetric{_label_starting_with_underscore=\"foo\"}",
-			v:    1,
-			lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
-		}, {
-			m:    "testmetric{label=\"\\\"bar\\\"\"}",
-			v:    1,
-			lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
-		}, {
-			m:   "foo",
-			typ: MetricTypeCounter,
-		}, {
-			m:    "foo_total",
-			v:    17,
-			lset: labels.FromStrings("__name__", "foo_total"),
-			t:    int64p(1520879607789),
-			e:    &exemplar.Exemplar{Labels: labels.FromStrings("xx", "yy"), Value: 5},
-		}, {
-			m:    "metric",
-			help: "foo\x00bar",
-		}, {
-			m:    "null_byte_metric{a=\"abc\x00\"}",
-			v:    1,
-			lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
-		},
-	}
-
-	p := NewOpenMetricsParser([]byte(input))
-	i := 0
-
-	var res labels.Labels
-
-	for {
-		et, err := p.Next()
-		if err == io.EOF {
-			break
-		}
-		require.NoError(t, err)
-
-		switch et {
-		case EntrySeries:
-			m, ts, v := p.Series()
-
-			var e exemplar.Exemplar
-			p.Metric(&res)
-			found := p.Exemplar(&e)
-			require.Equal(t, exp[i].m, string(m))
-			if e.HasTs {
-				require.Equal(t, exp[i].t, ts)
-			}
-			require.Equal(t, exp[i].v, v)
-			require.Equal(t, exp[i].lset, res)
-			if exp[i].e == nil {
-				require.Equal(t, false, found)
-			} else {
-				require.Equal(t, true, found)
-				require.Equal(t, *exp[i].e, e)
-			}
-			res = res[:0]
-
-		case EntryType:
-			m, typ := p.Type()
-			require.Equal(t, exp[i].m, string(m))
-			require.Equal(t, exp[i].typ, typ)
-
-		case EntryHelp:
-			m, h := p.Help()
-			require.Equal(t, exp[i].m, string(m))
-			require.Equal(t, exp[i].help, string(h))
-
-		case EntryUnit:
-			m, u := p.Unit()
-			require.Equal(t, exp[i].m, string(m))
-			require.Equal(t, exp[i].unit, string(u))
-
-		case EntryComment:
-			require.Equal(t, exp[i].comment, string(p.Comment()))
-		}
-
-		i++
-	}
-	require.Equal(t, len(exp), i)
-}
-
-func TestOpenMetricsParseErrors(t *testing.T) {
-	cases := []struct {
-		input string
-		err   string
-	}{
-		// Happy cases. EOF is returned by the parser at the end of valid
-		// data.
-		{
-			input: "# EOF",
-			err:   "EOF",
-		},
-		{
-			input: "# EOF\n",
-			err:   "EOF",
-		},
-		// Unhappy cases.
-		{
-			input: "",
-			err:   "data does not end with # EOF",
-		},
-		{
-			input: "\n",
-			err:   "\"INVALID\" \"\\n\" is not a valid start token",
-		},
-		{
-			input: "metric",
-			err:   "expected value after metric, got \"EOF\"",
-		},
-		{
-			input: "metric 1",
-			err:   "data does not end with # EOF",
-		},
-		{
-			input: "metric 1\n",
-			err:   "data does not end with # EOF",
-		},
-		{
-			input: "metric_total 1 # {aa=\"bb\"} 4",
-			err:   "data does not end with # EOF",
-		},
-		{
-			input: "a\n#EOF\n",
-			err:   "expected value after metric, got \"INVALID\"",
-		},
-		{
-			input: "\n\n#EOF\n",
-			err:   "\"INVALID\" \"\\n\" is not a valid start token",
-		},
-		{
-			input: " a 1\n#EOF\n",
-			err:   "\"INVALID\" \" \" is not a valid start token",
-		},
-		{
-			input: "9\n#EOF\n",
-			err:   "\"INVALID\" \"9\" is not a valid start token",
-		},
-		{
-			input: "# TYPE u untyped\n#EOF\n",
-			err:   "invalid metric type \"untyped\"",
-		},
-		{
-			input: "# TYPE c counter \n#EOF\n",
-			err:   "invalid metric type \"counter \"",
-		},
-		{
-			input: "#  TYPE c counter\n#EOF\n",
-			err:   "\"INVALID\" \" \" is not a valid start token",
-		},
-		{
-			input: "# UNIT metric suffix\n#EOF\n",
-			err:   "unit not a suffix of metric \"metric\"",
-		},
-		{
-			input: "# UNIT metricsuffix suffix\n#EOF\n",
-			err:   "unit not a suffix of metric \"metricsuffix\"",
-		},
-		{
-			input: "# UNIT m suffix\n#EOF\n",
-			err:   "unit not a suffix of metric \"m\"",
-		},
-		{
-			input: "# HELP m\n#EOF\n",
-			err:   "expected text in HELP, got \"INVALID\"",
-		},
-		{
-			input: "a\t1\n#EOF\n",
-			err:   "expected value after metric, got \"INVALID\"",
-		},
-		{
-			input: "a 1\t2\n#EOF\n",
-			err:   "strconv.ParseFloat: parsing \"1\\t2\": invalid syntax",
-		},
-		{
-			input: "a 1 2 \n#EOF\n",
-			err:   "expected next entry after timestamp, got \"INVALID\"",
-		},
-		{
-			input: "a 1 2 #\n#EOF\n",
-			err:   "expected next entry after timestamp, got \"TIMESTAMP\"",
-		},
-		{
-			input: "a 1 1z\n#EOF\n",
-			err:   "strconv.ParseFloat: parsing \"1z\": invalid syntax",
-		},
-		{
-			input: " # EOF\n",
-			err:   "\"INVALID\" \" \" is not a valid start token",
-		},
-		{
-			input: "# EOF\na 1",
-			err:   "unexpected data after # EOF",
-		},
-		{
-			input: "# EOF\n\n",
-			err:   "unexpected data after # EOF",
-		},
-		{
-			input: "# EOFa 1",
-			err:   "unexpected data after # EOF",
-		},
-		{
-			input: "#\tTYPE c counter\n",
-			err:   "\"INVALID\" \"\\t\" is not a valid start token",
-		},
-		{
-			input: "# TYPE c  counter\n",
-			err:   "invalid metric type \" counter\"",
-		},
-		{
-			input: "a 1 1 1\n# EOF\n",
-			err:   "expected next entry after timestamp, got \"TIMESTAMP\"",
-		},
-		{
-			input: "a{b='c'} 1\n# EOF\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{b=\"c\",} 1\n# EOF\n",
-			err:   "expected label name, got \"BCLOSE\"",
-		},
-		{
-			input: "a{,b=\"c\"} 1\n# EOF\n",
-			err:   "expected label name or left brace, got \"COMMA\"",
-		},
-		{
-			input: "a{b=\"c\"d=\"e\"} 1\n# EOF\n",
-			err:   "expected comma, got \"LNAME\"",
-		},
-		{
-			input: "a{b=\"c\",,d=\"e\"} 1\n# EOF\n",
-			err:   "expected label name, got \"COMMA\"",
-		},
-		{
-			input: "a{b=\n# EOF\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{\xff=\"foo\"} 1\n# EOF\n",
-			err:   "expected label name or left brace, got \"INVALID\"",
-		},
-		{
-			input: "a{b=\"\xff\"} 1\n# EOF\n",
-			err:   "invalid UTF-8 label value",
-		},
-		{
-			input: "a true\n",
-			err:   "strconv.ParseFloat: parsing \"true\": invalid syntax",
-		},
-		{
-			input: "something_weird{problem=\"\n# EOF\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "empty_label_name{=\"\"} 0\n# EOF\n",
-			err:   "expected label name or left brace, got \"EQUAL\"",
-		},
-		{
-			input: "foo 1_2\n\n# EOF\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "foo 0x1p-3\n\n# EOF\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "foo 0x1P-3\n\n# EOF\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "foo 0 1_2\n\n# EOF\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "custom_metric_total 1 # {aa=bb}\n# EOF\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "custom_metric_total 1 # {aa=\"bb\"}\n# EOF\n",
-			err:   "expected value after exemplar labels, got \"INVALID\"",
-		},
-		{
-			input: `custom_metric_total 1 # {aa="bb"}`,
-			err:   "expected value after exemplar labels, got \"EOF\"",
-		},
-		{
-			input: `custom_metric 1 # {aa="bb"}`,
-			err:   "metric name custom_metric does not support exemplars",
-		},
-		{
-			input: `custom_metric_total 1 # {aa="bb",,cc="dd"} 1`,
-			err:   "expected label name, got \"COMMA\"",
-		},
-		{
-			input: `custom_metric_total 1 # {aa="bb"} 1_2`,
-			err:   "unsupported character in float",
-		},
-		{
-			input: `custom_metric_total 1 # {aa="bb"} 0x1p-3`,
-			err:   "unsupported character in float",
-		},
-		{
-			input: `custom_metric_total 1 # {aa="bb"} true`,
-			err:   "strconv.ParseFloat: parsing \"true\": invalid syntax",
-		},
-		{
-			input: `custom_metric_total 1 # {aa="bb",cc=}`,
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: `custom_metric_total 1 # {aa=\"\xff\"} 9.0`,
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: `{b="c",} 1`,
-			err:   `"INVALID" "{" is not a valid start token`,
-		},
-		{
-			input: `a 1 NaN`,
-			err:   `invalid timestamp`,
-		},
-		{
-			input: `a 1 -Inf`,
-			err:   `invalid timestamp`,
-		},
-		{
-			input: `a 1 Inf`,
-			err:   `invalid timestamp`,
-		},
-		{
-			input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 NaN",
-			err:   `invalid exemplar timestamp`,
-		},
-		{
-			input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 -Inf",
-			err:   `invalid exemplar timestamp`,
-		},
-		{
-			input: "# TYPE hhh histogram\nhhh_bucket{le=\"+Inf\"} 1 # {aa=\"bb\"} 4 Inf",
-			err:   `invalid exemplar timestamp`,
-		},
-	}
-
-	for i, c := range cases {
-		p := NewOpenMetricsParser([]byte(c.input))
-		var err error
-		for err == nil {
-			_, err = p.Next()
-		}
-		require.Equal(t, c.err, err.Error(), "test %d: %s", i, c.input)
-	}
-}
-
-func TestOMNullByteHandling(t *testing.T) {
-	cases := []struct {
-		input string
-		err   string
-	}{
-		{
-			input: "null_byte_metric{a=\"abc\x00\"} 1\n# EOF\n",
-			err:   "",
-		},
-		{
-			input: "a{b=\"\x00ss\"} 1\n# EOF\n",
-			err:   "",
-		},
-		{
-			input: "a{b=\"\x00\"} 1\n# EOF\n",
-			err:   "",
-		},
-		{
-			input: "a{b=\"\x00\"} 1\n# EOF",
-			err:   "",
-		},
-		{
-			input: "a{b=\x00\"ssss\"} 1\n# EOF\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{b=\"\x00",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{b\x00=\"hiih\"}	1",
-			err: "expected equal, got \"INVALID\"",
-		},
-		{
-			input: "a\x00{b=\"ddd\"} 1",
-			err:   "expected value after metric, got \"INVALID\"",
-		},
-		{
-			input: "#",
-			err:   "\"INVALID\" \" \" is not a valid start token",
-		},
-		{
-			input: "# H",
-			err:   "\"INVALID\" \" \" is not a valid start token",
-		},
-		{
-			input: "custom_metric_total 1 # {b=\x00\"ssss\"} 1\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "custom_metric_total 1 # {b=\"\x00ss\"} 1\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-	}
-
-	for i, c := range cases {
-		p := NewOpenMetricsParser([]byte(c.input))
-		var err error
-		for err == nil {
-			_, err = p.Next()
-		}
-
-		if c.err == "" {
-			require.Equal(t, io.EOF, err, "test %d", i)
-			continue
-		}
-
-		require.Equal(t, c.err, err.Error(), "test %d", i)
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/promlex.l 2.33.5+ds1-2/pkg/textparse/promlex.l
--- 2.31.2+ds1-1/pkg/textparse/promlex.l	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/promlex.l	1970-01-01 00:00:00.000000000 +0000
@@ -1,100 +0,0 @@
-%{
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-    "fmt"
-)
-
-const (
-    sInit = iota
-    sComment
-    sMeta1
-    sMeta2
-    sLabels
-    sLValue
-    sValue
-    sTimestamp
-)
-
-// Lex is called by the parser generated by "go tool yacc" to obtain each
-// token. The method is opened before the matching rules block and closed at
-// the end of the file.
-func (l *promlexer) Lex() token {
-    if l.i >= len(l.b) {
-        return tEOF
-    }
-    c := l.b[l.i]
-    l.start = l.i
-
-%}
-
-D     [0-9]
-L     [a-zA-Z_]
-M     [a-zA-Z_:]
-C     [^\n]
-
-%x sComment sMeta1 sMeta2 sLabels sLValue sValue sTimestamp
-
-%yyc c
-%yyn c = l.next()
-%yyt l.state
-
-
-%%
-
-\0                                    return tEOF
-\n                                    l.state = sInit; return tLinebreak
-<*>[ \t]+                             return tWhitespace
-
-#[ \t]+                               l.state = sComment
-#                                     return l.consumeComment()
-<sComment>HELP[\t ]+                  l.state = sMeta1; return tHelp
-<sComment>TYPE[\t ]+                  l.state = sMeta1; return tType
-<sMeta1>{M}({M}|{D})*                 l.state = sMeta2; return tMName
-<sMeta2>{C}*                          l.state = sInit; return tText
-
-{M}({M}|{D})*                         l.state = sValue; return tMName
-<sValue>\{                            l.state = sLabels; return tBraceOpen
-<sLabels>{L}({L}|{D})*                return tLName
-<sLabels>\}                           l.state = sValue; return tBraceClose
-<sLabels>=                            l.state = sLValue; return tEqual
-<sLabels>,                            return tComma
-<sLValue>\"(\\.|[^\\"])*\"            l.state = sLabels; return tLValue
-<sValue>[^{ \t\n]+                    l.state = sTimestamp; return tValue
-<sTimestamp>{D}+                      return tTimestamp
-<sTimestamp>\n                        l.state = sInit; return tLinebreak
-
-%%
-    // Workaround to gobble up comments that started with a HELP or TYPE
-    // prefix. We just consume all characters until we reach a newline.
-    // This saves us from adding disproportionate complexity to the parser.
-    if l.state == sComment {
-        return l.consumeComment()
-    }
-    return tInvalid
-}
-
-func (l *promlexer) consumeComment() token {
-    for c := l.cur(); ; c = l.next() {
-        switch c {
-        case 0:
-            return tEOF
-        case '\n':
-            l.state = sInit
-            return tComment
-        }
-    }
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/promlex.l.go 2.33.5+ds1-2/pkg/textparse/promlex.l.go
--- 2.31.2+ds1-1/pkg/textparse/promlex.l.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/promlex.l.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,553 +0,0 @@
-// CAUTION: Generated file - DO NOT EDIT.
-
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-	"github.com/pkg/errors"
-)
-
-const (
-	sInit = iota
-	sComment
-	sMeta1
-	sMeta2
-	sLabels
-	sLValue
-	sValue
-	sTimestamp
-	sExemplar
-	sEValue
-	sETimestamp
-)
-
-// Lex is called by the parser generated by "go tool yacc" to obtain each
-// token. The method is opened before the matching rules block and closed at
-// the end of the file.
-func (l *promlexer) Lex() token {
-	if l.i >= len(l.b) {
-		return tEOF
-	}
-	c := l.b[l.i]
-	l.start = l.i
-
-yystate0:
-
-	switch yyt := l.state; yyt {
-	default:
-		panic(errors.Errorf(`invalid start condition %d`, yyt))
-	case 0: // start condition: INITIAL
-		goto yystart1
-	case 1: // start condition: sComment
-		goto yystart8
-	case 2: // start condition: sMeta1
-		goto yystart19
-	case 3: // start condition: sMeta2
-		goto yystart21
-	case 4: // start condition: sLabels
-		goto yystart24
-	case 5: // start condition: sLValue
-		goto yystart29
-	case 6: // start condition: sValue
-		goto yystart33
-	case 7: // start condition: sTimestamp
-		goto yystart36
-	}
-
-	goto yystate0 // silence unused label error
-	goto yystate1 // silence unused label error
-yystate1:
-	c = l.next()
-yystart1:
-	switch {
-	default:
-		goto yyabort
-	case c == '#':
-		goto yystate5
-	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate7
-	case c == '\n':
-		goto yystate4
-	case c == '\t' || c == ' ':
-		goto yystate3
-	case c == '\x00':
-		goto yystate2
-	}
-
-yystate2:
-	c = l.next()
-	goto yyrule1
-
-yystate3:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule3
-	case c == '\t' || c == ' ':
-		goto yystate3
-	}
-
-yystate4:
-	c = l.next()
-	goto yyrule2
-
-yystate5:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule5
-	case c == '\t' || c == ' ':
-		goto yystate6
-	}
-
-yystate6:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule4
-	case c == '\t' || c == ' ':
-		goto yystate6
-	}
-
-yystate7:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule10
-	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate7
-	}
-
-	goto yystate8 // silence unused label error
-yystate8:
-	c = l.next()
-yystart8:
-	switch {
-	default:
-		goto yyabort
-	case c == 'H':
-		goto yystate9
-	case c == 'T':
-		goto yystate14
-	case c == '\t' || c == ' ':
-		goto yystate3
-	}
-
-yystate9:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'E':
-		goto yystate10
-	}
-
-yystate10:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'L':
-		goto yystate11
-	}
-
-yystate11:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'P':
-		goto yystate12
-	}
-
-yystate12:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '\t' || c == ' ':
-		goto yystate13
-	}
-
-yystate13:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule6
-	case c == '\t' || c == ' ':
-		goto yystate13
-	}
-
-yystate14:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'Y':
-		goto yystate15
-	}
-
-yystate15:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'P':
-		goto yystate16
-	}
-
-yystate16:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == 'E':
-		goto yystate17
-	}
-
-yystate17:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '\t' || c == ' ':
-		goto yystate18
-	}
-
-yystate18:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule7
-	case c == '\t' || c == ' ':
-		goto yystate18
-	}
-
-	goto yystate19 // silence unused label error
-yystate19:
-	c = l.next()
-yystart19:
-	switch {
-	default:
-		goto yyabort
-	case c == ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate20
-	case c == '\t' || c == ' ':
-		goto yystate3
-	}
-
-yystate20:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule8
-	case c >= '0' && c <= ':' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate20
-	}
-
-	goto yystate21 // silence unused label error
-yystate21:
-	c = l.next()
-yystart21:
-	switch {
-	default:
-		goto yyrule9
-	case c == '\t' || c == ' ':
-		goto yystate23
-	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate22
-	}
-
-yystate22:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule9
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
-		goto yystate22
-	}
-
-yystate23:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule3
-	case c == '\t' || c == ' ':
-		goto yystate23
-	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'ÿ':
-		goto yystate22
-	}
-
-	goto yystate24 // silence unused label error
-yystate24:
-	c = l.next()
-yystart24:
-	switch {
-	default:
-		goto yyabort
-	case c == ',':
-		goto yystate25
-	case c == '=':
-		goto yystate26
-	case c == '\t' || c == ' ':
-		goto yystate3
-	case c == '}':
-		goto yystate28
-	case c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate27
-	}
-
-yystate25:
-	c = l.next()
-	goto yyrule15
-
-yystate26:
-	c = l.next()
-	goto yyrule14
-
-yystate27:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule12
-	case c >= '0' && c <= '9' || c >= 'A' && c <= 'Z' || c == '_' || c >= 'a' && c <= 'z':
-		goto yystate27
-	}
-
-yystate28:
-	c = l.next()
-	goto yyrule13
-
-	goto yystate29 // silence unused label error
-yystate29:
-	c = l.next()
-yystart29:
-	switch {
-	default:
-		goto yyabort
-	case c == '"':
-		goto yystate30
-	case c == '\t' || c == ' ':
-		goto yystate3
-	}
-
-yystate30:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c == '"':
-		goto yystate31
-	case c == '\\':
-		goto yystate32
-	case c >= '\x01' && c <= '!' || c >= '#' && c <= '[' || c >= ']' && c <= 'ÿ':
-		goto yystate30
-	}
-
-yystate31:
-	c = l.next()
-	goto yyrule16
-
-yystate32:
-	c = l.next()
-	switch {
-	default:
-		goto yyabort
-	case c >= '\x01' && c <= '\t' || c >= '\v' && c <= 'ÿ':
-		goto yystate30
-	}
-
-	goto yystate33 // silence unused label error
-yystate33:
-	c = l.next()
-yystart33:
-	switch {
-	default:
-		goto yyabort
-	case c == '\t' || c == ' ':
-		goto yystate3
-	case c == '{':
-		goto yystate35
-	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
-		goto yystate34
-	}
-
-yystate34:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule17
-	case c >= '\x01' && c <= '\b' || c >= '\v' && c <= '\x1f' || c >= '!' && c <= 'z' || c >= '|' && c <= 'ÿ':
-		goto yystate34
-	}
-
-yystate35:
-	c = l.next()
-	goto yyrule11
-
-	goto yystate36 // silence unused label error
-yystate36:
-	c = l.next()
-yystart36:
-	switch {
-	default:
-		goto yyabort
-	case c == '\n':
-		goto yystate37
-	case c == '\t' || c == ' ':
-		goto yystate3
-	case c >= '0' && c <= '9':
-		goto yystate38
-	}
-
-yystate37:
-	c = l.next()
-	goto yyrule19
-
-yystate38:
-	c = l.next()
-	switch {
-	default:
-		goto yyrule18
-	case c >= '0' && c <= '9':
-		goto yystate38
-	}
-
-yyrule1: // \0
-	{
-		return tEOF
-	}
-yyrule2: // \n
-	{
-		l.state = sInit
-		return tLinebreak
-		goto yystate0
-	}
-yyrule3: // [ \t]+
-	{
-		return tWhitespace
-	}
-yyrule4: // #[ \t]+
-	{
-		l.state = sComment
-		goto yystate0
-	}
-yyrule5: // #
-	{
-		return l.consumeComment()
-	}
-yyrule6: // HELP[\t ]+
-	{
-		l.state = sMeta1
-		return tHelp
-		goto yystate0
-	}
-yyrule7: // TYPE[\t ]+
-	{
-		l.state = sMeta1
-		return tType
-		goto yystate0
-	}
-yyrule8: // {M}({M}|{D})*
-	{
-		l.state = sMeta2
-		return tMName
-		goto yystate0
-	}
-yyrule9: // {C}*
-	{
-		l.state = sInit
-		return tText
-		goto yystate0
-	}
-yyrule10: // {M}({M}|{D})*
-	{
-		l.state = sValue
-		return tMName
-		goto yystate0
-	}
-yyrule11: // \{
-	{
-		l.state = sLabels
-		return tBraceOpen
-		goto yystate0
-	}
-yyrule12: // {L}({L}|{D})*
-	{
-		return tLName
-	}
-yyrule13: // \}
-	{
-		l.state = sValue
-		return tBraceClose
-		goto yystate0
-	}
-yyrule14: // =
-	{
-		l.state = sLValue
-		return tEqual
-		goto yystate0
-	}
-yyrule15: // ,
-	{
-		return tComma
-	}
-yyrule16: // \"(\\.|[^\\"])*\"
-	{
-		l.state = sLabels
-		return tLValue
-		goto yystate0
-	}
-yyrule17: // [^{ \t\n]+
-	{
-		l.state = sTimestamp
-		return tValue
-		goto yystate0
-	}
-yyrule18: // {D}+
-	{
-		return tTimestamp
-	}
-yyrule19: // \n
-	{
-		l.state = sInit
-		return tLinebreak
-		goto yystate0
-	}
-	panic("unreachable")
-
-	goto yyabort // silence unused label error
-
-yyabort: // no lexem recognized
-	// Workaround to gobble up comments that started with a HELP or TYPE
-	// prefix. We just consume all characters until we reach a newline.
-	// This saves us from adding disproportionate complexity to the parser.
-	if l.state == sComment {
-		return l.consumeComment()
-	}
-	return tInvalid
-}
-
-func (l *promlexer) consumeComment() token {
-	for c := l.cur(); ; c = l.next() {
-		switch c {
-		case 0:
-			return tEOF
-		case '\n':
-			l.state = sInit
-			return tComment
-		}
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/promparse.go 2.33.5+ds1-2/pkg/textparse/promparse.go
--- 2.31.2+ds1-1/pkg/textparse/promparse.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/promparse.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,426 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-//go:generate go get -u modernc.org/golex
-//go:generate golex -o=promlex.l.go promlex.l
-
-package textparse
-
-import (
-	"fmt"
-	"io"
-	"math"
-	"sort"
-	"strconv"
-	"strings"
-	"unicode/utf8"
-	"unsafe"
-
-	"github.com/pkg/errors"
-
-	"github.com/prometheus/prometheus/pkg/exemplar"
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/value"
-)
-
-type promlexer struct {
-	b     []byte
-	i     int
-	start int
-	err   error
-	state int
-}
-
-type token int
-
-const (
-	tInvalid   token = -1
-	tEOF       token = 0
-	tLinebreak token = iota
-	tWhitespace
-	tHelp
-	tType
-	tUnit
-	tEOFWord
-	tText
-	tComment
-	tBlank
-	tMName
-	tBraceOpen
-	tBraceClose
-	tLName
-	tLValue
-	tComma
-	tEqual
-	tTimestamp
-	tValue
-)
-
-func (t token) String() string {
-	switch t {
-	case tInvalid:
-		return "INVALID"
-	case tEOF:
-		return "EOF"
-	case tLinebreak:
-		return "LINEBREAK"
-	case tWhitespace:
-		return "WHITESPACE"
-	case tHelp:
-		return "HELP"
-	case tType:
-		return "TYPE"
-	case tUnit:
-		return "UNIT"
-	case tEOFWord:
-		return "EOFWORD"
-	case tText:
-		return "TEXT"
-	case tComment:
-		return "COMMENT"
-	case tBlank:
-		return "BLANK"
-	case tMName:
-		return "MNAME"
-	case tBraceOpen:
-		return "BOPEN"
-	case tBraceClose:
-		return "BCLOSE"
-	case tLName:
-		return "LNAME"
-	case tLValue:
-		return "LVALUE"
-	case tEqual:
-		return "EQUAL"
-	case tComma:
-		return "COMMA"
-	case tTimestamp:
-		return "TIMESTAMP"
-	case tValue:
-		return "VALUE"
-	}
-	return fmt.Sprintf("<invalid: %d>", t)
-}
-
-// buf returns the buffer of the current token.
-func (l *promlexer) buf() []byte {
-	return l.b[l.start:l.i]
-}
-
-func (l *promlexer) cur() byte {
-	return l.b[l.i]
-}
-
-// next advances the promlexer to the next character.
-func (l *promlexer) next() byte {
-	l.i++
-	if l.i >= len(l.b) {
-		l.err = io.EOF
-		return byte(tEOF)
-	}
-	// Lex struggles with null bytes. If we are in a label value or help string, where
-	// they are allowed, consume them here immediately.
-	for l.b[l.i] == 0 && (l.state == sLValue || l.state == sMeta2 || l.state == sComment) {
-		l.i++
-	}
-	return l.b[l.i]
-}
-
-func (l *promlexer) Error(es string) {
-	l.err = errors.New(es)
-}
-
-// PromParser parses samples from a byte slice of samples in the official
-// Prometheus text exposition format.
-type PromParser struct {
-	l       *promlexer
-	series  []byte
-	text    []byte
-	mtype   MetricType
-	val     float64
-	ts      int64
-	hasTS   bool
-	start   int
-	offsets []int
-}
-
-// NewPromParser returns a new parser of the byte slice.
-func NewPromParser(b []byte) Parser {
-	return &PromParser{l: &promlexer{b: append(b, '\n')}}
-}
-
-// Series returns the bytes of the series, the timestamp if set, and the value
-// of the current sample.
-func (p *PromParser) Series() ([]byte, *int64, float64) {
-	if p.hasTS {
-		return p.series, &p.ts, p.val
-	}
-	return p.series, nil, p.val
-}
-
-// Help returns the metric name and help text in the current entry.
-// Must only be called after Next returned a help entry.
-// The returned byte slices become invalid after the next call to Next.
-func (p *PromParser) Help() ([]byte, []byte) {
-	m := p.l.b[p.offsets[0]:p.offsets[1]]
-
-	// Replacer causes allocations. Replace only when necessary.
-	if strings.IndexByte(yoloString(p.text), byte('\\')) >= 0 {
-		return m, []byte(helpReplacer.Replace(string(p.text)))
-	}
-	return m, p.text
-}
-
-// Type returns the metric name and type in the current entry.
-// Must only be called after Next returned a type entry.
-// The returned byte slices become invalid after the next call to Next.
-func (p *PromParser) Type() ([]byte, MetricType) {
-	return p.l.b[p.offsets[0]:p.offsets[1]], p.mtype
-}
-
-// Unit returns the metric name and unit in the current entry.
-// Must only be called after Next returned a unit entry.
-// The returned byte slices become invalid after the next call to Next.
-func (p *PromParser) Unit() ([]byte, []byte) {
-	// The Prometheus format does not have units.
-	return nil, nil
-}
-
-// Comment returns the text of the current comment.
-// Must only be called after Next returned a comment entry.
-// The returned byte slice becomes invalid after the next call to Next.
-func (p *PromParser) Comment() []byte {
-	return p.text
-}
-
-// Metric writes the labels of the current sample into the passed labels.
-// It returns the string from which the metric was parsed.
-func (p *PromParser) Metric(l *labels.Labels) string {
-	// Allocate the full immutable string immediately, so we just
-	// have to create references on it below.
-	s := string(p.series)
-
-	*l = append(*l, labels.Label{
-		Name:  labels.MetricName,
-		Value: s[:p.offsets[0]-p.start],
-	})
-
-	for i := 1; i < len(p.offsets); i += 4 {
-		a := p.offsets[i] - p.start
-		b := p.offsets[i+1] - p.start
-		c := p.offsets[i+2] - p.start
-		d := p.offsets[i+3] - p.start
-
-		// Replacer causes allocations. Replace only when necessary.
-		if strings.IndexByte(s[c:d], byte('\\')) >= 0 {
-			*l = append(*l, labels.Label{Name: s[a:b], Value: lvalReplacer.Replace(s[c:d])})
-			continue
-		}
-		*l = append(*l, labels.Label{Name: s[a:b], Value: s[c:d]})
-	}
-
-	// Sort labels to maintain the sorted labels invariant.
-	sort.Sort(*l)
-
-	return s
-}
-
-// Exemplar writes the exemplar of the current sample into the passed
-// exemplar. It returns if an exemplar exists.
-func (p *PromParser) Exemplar(e *exemplar.Exemplar) bool {
-	return false
-}
-
-// nextToken returns the next token from the promlexer. It skips over tabs
-// and spaces.
-func (p *PromParser) nextToken() token {
-	for {
-		if tok := p.l.Lex(); tok != tWhitespace {
-			return tok
-		}
-	}
-}
-
-func parseError(exp string, got token) error {
-	return errors.Errorf("%s, got %q", exp, got)
-}
-
-// Next advances the parser to the next sample. It returns false if no
-// more samples were read or an error occurred.
-func (p *PromParser) Next() (Entry, error) {
-	var err error
-
-	p.start = p.l.i
-	p.offsets = p.offsets[:0]
-
-	switch t := p.nextToken(); t {
-	case tEOF:
-		return EntryInvalid, io.EOF
-	case tLinebreak:
-		// Allow full blank lines.
-		return p.Next()
-
-	case tHelp, tType:
-		switch t := p.nextToken(); t {
-		case tMName:
-			p.offsets = append(p.offsets, p.l.start, p.l.i)
-		default:
-			return EntryInvalid, parseError("expected metric name after HELP", t)
-		}
-		switch t := p.nextToken(); t {
-		case tText:
-			if len(p.l.buf()) > 1 {
-				p.text = p.l.buf()[1:]
-			} else {
-				p.text = []byte{}
-			}
-		default:
-			return EntryInvalid, parseError("expected text in HELP", t)
-		}
-		switch t {
-		case tType:
-			switch s := yoloString(p.text); s {
-			case "counter":
-				p.mtype = MetricTypeCounter
-			case "gauge":
-				p.mtype = MetricTypeGauge
-			case "histogram":
-				p.mtype = MetricTypeHistogram
-			case "summary":
-				p.mtype = MetricTypeSummary
-			case "untyped":
-				p.mtype = MetricTypeUnknown
-			default:
-				return EntryInvalid, errors.Errorf("invalid metric type %q", s)
-			}
-		case tHelp:
-			if !utf8.Valid(p.text) {
-				return EntryInvalid, errors.Errorf("help text is not a valid utf8 string")
-			}
-		}
-		if t := p.nextToken(); t != tLinebreak {
-			return EntryInvalid, parseError("linebreak expected after metadata", t)
-		}
-		switch t {
-		case tHelp:
-			return EntryHelp, nil
-		case tType:
-			return EntryType, nil
-		}
-	case tComment:
-		p.text = p.l.buf()
-		if t := p.nextToken(); t != tLinebreak {
-			return EntryInvalid, parseError("linebreak expected after comment", t)
-		}
-		return EntryComment, nil
-
-	case tMName:
-		p.offsets = append(p.offsets, p.l.i)
-		p.series = p.l.b[p.start:p.l.i]
-
-		t2 := p.nextToken()
-		if t2 == tBraceOpen {
-			if err := p.parseLVals(); err != nil {
-				return EntryInvalid, err
-			}
-			p.series = p.l.b[p.start:p.l.i]
-			t2 = p.nextToken()
-		}
-		if t2 != tValue {
-			return EntryInvalid, parseError("expected value after metric", t)
-		}
-		if p.val, err = parseFloat(yoloString(p.l.buf())); err != nil {
-			return EntryInvalid, err
-		}
-		// Ensure canonical NaN value.
-		if math.IsNaN(p.val) {
-			p.val = math.Float64frombits(value.NormalNaN)
-		}
-		p.hasTS = false
-		switch p.nextToken() {
-		case tLinebreak:
-			break
-		case tTimestamp:
-			p.hasTS = true
-			if p.ts, err = strconv.ParseInt(yoloString(p.l.buf()), 10, 64); err != nil {
-				return EntryInvalid, err
-			}
-			if t2 := p.nextToken(); t2 != tLinebreak {
-				return EntryInvalid, parseError("expected next entry after timestamp", t)
-			}
-		default:
-			return EntryInvalid, parseError("expected timestamp or new record", t)
-		}
-		return EntrySeries, nil
-
-	default:
-		err = errors.Errorf("%q is not a valid start token", t)
-	}
-	return EntryInvalid, err
-}
-
-func (p *PromParser) parseLVals() error {
-	t := p.nextToken()
-	for {
-		switch t {
-		case tBraceClose:
-			return nil
-		case tLName:
-		default:
-			return parseError("expected label name", t)
-		}
-		p.offsets = append(p.offsets, p.l.start, p.l.i)
-
-		if t := p.nextToken(); t != tEqual {
-			return parseError("expected equal", t)
-		}
-		if t := p.nextToken(); t != tLValue {
-			return parseError("expected label value", t)
-		}
-		if !utf8.Valid(p.l.buf()) {
-			return errors.Errorf("invalid UTF-8 label value")
-		}
-
-		// The promlexer ensures the value string is quoted. Strip first
-		// and last character.
-		p.offsets = append(p.offsets, p.l.start+1, p.l.i-1)
-
-		// Free trailing commas are allowed.
-		if t = p.nextToken(); t == tComma {
-			t = p.nextToken()
-		}
-	}
-}
-
-var lvalReplacer = strings.NewReplacer(
-	`\"`, "\"",
-	`\\`, "\\",
-	`\n`, "\n",
-)
-
-var helpReplacer = strings.NewReplacer(
-	`\\`, "\\",
-	`\n`, "\n",
-)
-
-func yoloString(b []byte) string {
-	return *((*string)(unsafe.Pointer(&b)))
-}
-
-func parseFloat(s string) (float64, error) {
-	// Keep to pre-Go 1.13 float formats.
-	if strings.ContainsAny(s, "pP_") {
-		return 0, fmt.Errorf("unsupported character in float")
-	}
-	return strconv.ParseFloat(s, 64)
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/promparse_test.go 2.33.5+ds1-2/pkg/textparse/promparse_test.go
--- 2.31.2+ds1-1/pkg/textparse/promparse_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/promparse_test.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,521 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package textparse
-
-import (
-	"bytes"
-	"compress/gzip"
-	"io"
-	"io/ioutil"
-	"os"
-	"testing"
-
-	"github.com/prometheus/common/expfmt"
-	"github.com/prometheus/common/model"
-	"github.com/stretchr/testify/require"
-
-	"github.com/prometheus/prometheus/pkg/labels"
-)
-
-func TestPromParse(t *testing.T) {
-	input := `# HELP go_gc_duration_seconds A summary of the GC invocation durations.
-# 	TYPE go_gc_duration_seconds summary
-go_gc_duration_seconds{quantile="0"} 4.9351e-05
-go_gc_duration_seconds{quantile="0.25",} 7.424100000000001e-05
-go_gc_duration_seconds{quantile="0.5",a="b"} 8.3835e-05
-go_gc_duration_seconds{quantile="0.8", a="b"} 8.3835e-05
-go_gc_duration_seconds{ quantile="0.9", a="b"} 8.3835e-05
-# Hrandom comment starting with prefix of HELP
-#
-wind_speed{A="2",c="3"} 12345
-# comment with escaped \n newline
-# comment with escaped \ escape character
-# HELP nohelp1
-# HELP nohelp2 
-go_gc_duration_seconds{ quantile="1.0", a="b" } 8.3835e-05
-go_gc_duration_seconds { quantile="1.0", a="b" } 8.3835e-05
-go_gc_duration_seconds { quantile= "1.0", a= "b", } 8.3835e-05
-go_gc_duration_seconds { quantile = "1.0", a = "b" } 8.3835e-05
-go_gc_duration_seconds_count 99
-some:aggregate:rate5m{a_b="c"}	1
-# HELP go_goroutines Number of goroutines that currently exist.
-# TYPE go_goroutines gauge
-go_goroutines 33  	123123
-_metric_starting_with_underscore 1
-testmetric{_label_starting_with_underscore="foo"} 1
-testmetric{label="\"bar\""} 1`
-	input += "\n# HELP metric foo\x00bar"
-	input += "\nnull_byte_metric{a=\"abc\x00\"} 1"
-
-	int64p := func(x int64) *int64 { return &x }
-
-	exp := []struct {
-		lset    labels.Labels
-		m       string
-		t       *int64
-		v       float64
-		typ     MetricType
-		help    string
-		comment string
-	}{
-		{
-			m:    "go_gc_duration_seconds",
-			help: "A summary of the GC invocation durations.",
-		}, {
-			m:   "go_gc_duration_seconds",
-			typ: MetricTypeSummary,
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0"}`,
-			v:    4.9351e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0"),
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0.25",}`,
-			v:    7.424100000000001e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.25"),
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0.5",a="b"}`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.5", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds{quantile="0.8", a="b"}`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.8", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds{ quantile="0.9", a="b"}`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "0.9", "a", "b"),
-		}, {
-			comment: "# Hrandom comment starting with prefix of HELP",
-		}, {
-			comment: "#",
-		}, {
-			m:    `wind_speed{A="2",c="3"}`,
-			v:    12345,
-			lset: labels.FromStrings("A", "2", "__name__", "wind_speed", "c", "3"),
-		}, {
-			comment: "# comment with escaped \\n newline",
-		}, {
-			comment: "# comment with escaped \\ escape character",
-		}, {
-			m:    "nohelp1",
-			help: "",
-		}, {
-			m:    "nohelp2",
-			help: "",
-		}, {
-			m:    `go_gc_duration_seconds{ quantile="1.0", a="b" }`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds { quantile="1.0", a="b" }`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds { quantile= "1.0", a= "b", }`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds { quantile = "1.0", a = "b" }`,
-			v:    8.3835e-05,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds", "quantile", "1.0", "a", "b"),
-		}, {
-			m:    `go_gc_duration_seconds_count`,
-			v:    99,
-			lset: labels.FromStrings("__name__", "go_gc_duration_seconds_count"),
-		}, {
-			m:    `some:aggregate:rate5m{a_b="c"}`,
-			v:    1,
-			lset: labels.FromStrings("__name__", "some:aggregate:rate5m", "a_b", "c"),
-		}, {
-			m:    "go_goroutines",
-			help: "Number of goroutines that currently exist.",
-		}, {
-			m:   "go_goroutines",
-			typ: MetricTypeGauge,
-		}, {
-			m:    `go_goroutines`,
-			v:    33,
-			t:    int64p(123123),
-			lset: labels.FromStrings("__name__", "go_goroutines"),
-		}, {
-			m:    "_metric_starting_with_underscore",
-			v:    1,
-			lset: labels.FromStrings("__name__", "_metric_starting_with_underscore"),
-		}, {
-			m:    "testmetric{_label_starting_with_underscore=\"foo\"}",
-			v:    1,
-			lset: labels.FromStrings("__name__", "testmetric", "_label_starting_with_underscore", "foo"),
-		}, {
-			m:    "testmetric{label=\"\\\"bar\\\"\"}",
-			v:    1,
-			lset: labels.FromStrings("__name__", "testmetric", "label", `"bar"`),
-		}, {
-			m:    "metric",
-			help: "foo\x00bar",
-		}, {
-			m:    "null_byte_metric{a=\"abc\x00\"}",
-			v:    1,
-			lset: labels.FromStrings("__name__", "null_byte_metric", "a", "abc\x00"),
-		},
-	}
-
-	p := NewPromParser([]byte(input))
-	i := 0
-
-	var res labels.Labels
-
-	for {
-		et, err := p.Next()
-		if err == io.EOF {
-			break
-		}
-		require.NoError(t, err)
-
-		switch et {
-		case EntrySeries:
-			m, ts, v := p.Series()
-
-			p.Metric(&res)
-
-			require.Equal(t, exp[i].m, string(m))
-			require.Equal(t, exp[i].t, ts)
-			require.Equal(t, exp[i].v, v)
-			require.Equal(t, exp[i].lset, res)
-			res = res[:0]
-
-		case EntryType:
-			m, typ := p.Type()
-			require.Equal(t, exp[i].m, string(m))
-			require.Equal(t, exp[i].typ, typ)
-
-		case EntryHelp:
-			m, h := p.Help()
-			require.Equal(t, exp[i].m, string(m))
-			require.Equal(t, exp[i].help, string(h))
-
-		case EntryComment:
-			require.Equal(t, exp[i].comment, string(p.Comment()))
-		}
-
-		i++
-	}
-	require.Equal(t, len(exp), i)
-}
-
-func TestPromParseErrors(t *testing.T) {
-	cases := []struct {
-		input string
-		err   string
-	}{
-		{
-			input: "a",
-			err:   "expected value after metric, got \"MNAME\"",
-		},
-		{
-			input: "a{b='c'} 1\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{b=\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{\xff=\"foo\"} 1\n",
-			err:   "expected label name, got \"INVALID\"",
-		},
-		{
-			input: "a{b=\"\xff\"} 1\n",
-			err:   "invalid UTF-8 label value",
-		},
-		{
-			input: "a true\n",
-			err:   "strconv.ParseFloat: parsing \"true\": invalid syntax",
-		},
-		{
-			input: "something_weird{problem=\"",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "empty_label_name{=\"\"} 0",
-			err:   "expected label name, got \"EQUAL\"",
-		},
-		{
-			input: "foo 1_2\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "foo 0x1p-3\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "foo 0x1P-3\n",
-			err:   "unsupported character in float",
-		},
-		{
-			input: "foo 0 1_2\n",
-			err:   "expected next entry after timestamp, got \"MNAME\"",
-		},
-		{
-			input: `{a="ok"} 1`,
-			err:   `"INVALID" is not a valid start token`,
-		},
-	}
-
-	for i, c := range cases {
-		p := NewPromParser([]byte(c.input))
-		var err error
-		for err == nil {
-			_, err = p.Next()
-		}
-		require.Error(t, err)
-		require.Equal(t, c.err, err.Error(), "test %d", i)
-	}
-}
-
-func TestPromNullByteHandling(t *testing.T) {
-	cases := []struct {
-		input string
-		err   string
-	}{
-		{
-			input: "null_byte_metric{a=\"abc\x00\"} 1",
-			err:   "",
-		},
-		{
-			input: "a{b=\"\x00ss\"} 1\n",
-			err:   "",
-		},
-		{
-			input: "a{b=\"\x00\"} 1\n",
-			err:   "",
-		},
-		{
-			input: "a{b=\"\x00\"} 1\n",
-			err:   "",
-		},
-		{
-			input: "a{b=\x00\"ssss\"} 1\n",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{b=\"\x00",
-			err:   "expected label value, got \"INVALID\"",
-		},
-		{
-			input: "a{b\x00=\"hiih\"}	1",
-			err: "expected equal, got \"INVALID\"",
-		},
-		{
-			input: "a\x00{b=\"ddd\"} 1",
-			err:   "expected value after metric, got \"MNAME\"",
-		},
-	}
-
-	for i, c := range cases {
-		p := NewPromParser([]byte(c.input))
-		var err error
-		for err == nil {
-			_, err = p.Next()
-		}
-
-		if c.err == "" {
-			require.Equal(t, io.EOF, err, "test %d", i)
-			continue
-		}
-
-		require.Error(t, err)
-		require.Equal(t, c.err, err.Error(), "test %d", i)
-	}
-}
-
-const (
-	promtestdataSampleCount = 410
-)
-
-func BenchmarkParse(b *testing.B) {
-	for parserName, parser := range map[string]func([]byte) Parser{
-		"prometheus":  NewPromParser,
-		"openmetrics": NewOpenMetricsParser,
-	} {
-		for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
-			f, err := os.Open(fn)
-			require.NoError(b, err)
-			defer f.Close()
-
-			buf, err := ioutil.ReadAll(f)
-			require.NoError(b, err)
-
-			b.Run(parserName+"/no-decode-metric/"+fn, func(b *testing.B) {
-				total := 0
-
-				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
-				b.ReportAllocs()
-				b.ResetTimer()
-
-				for i := 0; i < b.N; i += promtestdataSampleCount {
-					p := parser(buf)
-
-				Outer:
-					for i < b.N {
-						t, err := p.Next()
-						switch t {
-						case EntryInvalid:
-							if err == io.EOF {
-								break Outer
-							}
-							b.Fatal(err)
-						case EntrySeries:
-							m, _, _ := p.Series()
-							total += len(m)
-							i++
-						}
-					}
-				}
-				_ = total
-			})
-			b.Run(parserName+"/decode-metric/"+fn, func(b *testing.B) {
-				total := 0
-
-				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
-				b.ReportAllocs()
-				b.ResetTimer()
-
-				for i := 0; i < b.N; i += promtestdataSampleCount {
-					p := parser(buf)
-
-				Outer:
-					for i < b.N {
-						t, err := p.Next()
-						switch t {
-						case EntryInvalid:
-							if err == io.EOF {
-								break Outer
-							}
-							b.Fatal(err)
-						case EntrySeries:
-							m, _, _ := p.Series()
-
-							res := make(labels.Labels, 0, 5)
-							p.Metric(&res)
-
-							total += len(m)
-							i++
-						}
-					}
-				}
-				_ = total
-			})
-			b.Run(parserName+"/decode-metric-reuse/"+fn, func(b *testing.B) {
-				total := 0
-				res := make(labels.Labels, 0, 5)
-
-				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
-				b.ReportAllocs()
-				b.ResetTimer()
-
-				for i := 0; i < b.N; i += promtestdataSampleCount {
-					p := parser(buf)
-
-				Outer:
-					for i < b.N {
-						t, err := p.Next()
-						switch t {
-						case EntryInvalid:
-							if err == io.EOF {
-								break Outer
-							}
-							b.Fatal(err)
-						case EntrySeries:
-							m, _, _ := p.Series()
-
-							p.Metric(&res)
-
-							total += len(m)
-							i++
-							res = res[:0]
-						}
-					}
-				}
-				_ = total
-			})
-			b.Run("expfmt-text/"+fn, func(b *testing.B) {
-				b.SetBytes(int64(len(buf) * (b.N / promtestdataSampleCount)))
-				b.ReportAllocs()
-				b.ResetTimer()
-
-				total := 0
-
-				for i := 0; i < b.N; i += promtestdataSampleCount {
-					var (
-						decSamples = make(model.Vector, 0, 50)
-					)
-					sdec := expfmt.SampleDecoder{
-						Dec: expfmt.NewDecoder(bytes.NewReader(buf), expfmt.FmtText),
-						Opts: &expfmt.DecodeOptions{
-							Timestamp: model.TimeFromUnixNano(0),
-						},
-					}
-
-					for {
-						if err = sdec.Decode(&decSamples); err != nil {
-							break
-						}
-						total += len(decSamples)
-						decSamples = decSamples[:0]
-					}
-				}
-				_ = total
-			})
-		}
-	}
-}
-func BenchmarkGzip(b *testing.B) {
-	for _, fn := range []string{"promtestdata.txt", "promtestdata.nometa.txt"} {
-		b.Run(fn, func(b *testing.B) {
-			f, err := os.Open(fn)
-			require.NoError(b, err)
-			defer f.Close()
-
-			var buf bytes.Buffer
-			gw := gzip.NewWriter(&buf)
-
-			n, err := io.Copy(gw, f)
-			require.NoError(b, err)
-			require.NoError(b, gw.Close())
-
-			gbuf, err := ioutil.ReadAll(&buf)
-			require.NoError(b, err)
-
-			k := b.N / promtestdataSampleCount
-
-			b.ReportAllocs()
-			b.SetBytes(int64(k) * int64(n))
-			b.ResetTimer()
-
-			total := 0
-
-			for i := 0; i < k; i++ {
-				gr, err := gzip.NewReader(bytes.NewReader(gbuf))
-				require.NoError(b, err)
-
-				d, err := ioutil.ReadAll(gr)
-				require.NoError(b, err)
-				require.NoError(b, gr.Close())
-
-				total += len(d)
-			}
-			_ = total
-		})
-	}
-}
diff -pruN 2.31.2+ds1-1/pkg/textparse/promtestdata.nometa.txt 2.33.5+ds1-2/pkg/textparse/promtestdata.nometa.txt
--- 2.31.2+ds1-1/pkg/textparse/promtestdata.nometa.txt	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/promtestdata.nometa.txt	1970-01-01 00:00:00.000000000 +0000
@@ -1,411 +0,0 @@
-go_gc_duration_seconds{quantile="0"} 4.9351e-05
-go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
-go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
-go_gc_duration_seconds{quantile="0.75"} 0.000106744
-go_gc_duration_seconds{quantile="1"} 0.002072195
-go_gc_duration_seconds_sum 0.012139815
-go_gc_duration_seconds_count 99
-go_goroutines 33
-go_memstats_alloc_bytes 1.7518624e+07
-go_memstats_alloc_bytes_total 8.3062296e+08
-go_memstats_buck_hash_sys_bytes 1.494637e+06
-go_memstats_frees_total 4.65658e+06
-go_memstats_gc_sys_bytes 1.107968e+06
-go_memstats_heap_alloc_bytes 1.7518624e+07
-go_memstats_heap_idle_bytes 6.668288e+06
-go_memstats_heap_inuse_bytes 1.8956288e+07
-go_memstats_heap_objects 72755
-go_memstats_heap_released_bytes_total 0
-go_memstats_heap_sys_bytes 2.5624576e+07
-go_memstats_last_gc_time_seconds 1.4843955586166437e+09
-go_memstats_lookups_total 2089
-go_memstats_mallocs_total 4.729335e+06
-go_memstats_mcache_inuse_bytes 9600
-go_memstats_mcache_sys_bytes 16384
-go_memstats_mspan_inuse_bytes 211520
-go_memstats_mspan_sys_bytes 245760
-go_memstats_next_gc_bytes 2.033527e+07
-go_memstats_other_sys_bytes 2.077323e+06
-go_memstats_stack_inuse_bytes 1.6384e+06
-go_memstats_stack_sys_bytes 1.6384e+06
-go_memstats_sys_bytes 3.2205048e+07
-http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="alerts"} 0
-http_request_duration_microseconds_count{handler="alerts"} 0
-http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="config"} 0
-http_request_duration_microseconds_count{handler="config"} 0
-http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="consoles"} 0
-http_request_duration_microseconds_count{handler="consoles"} 0
-http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="drop_series"} 0
-http_request_duration_microseconds_count{handler="drop_series"} 0
-http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="federate"} 0
-http_request_duration_microseconds_count{handler="federate"} 0
-http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="flags"} 0
-http_request_duration_microseconds_count{handler="flags"} 0
-http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
-http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
-http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
-http_request_duration_microseconds_sum{handler="graph"} 5803.93
-http_request_duration_microseconds_count{handler="graph"} 3
-http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="heap"} 0
-http_request_duration_microseconds_count{handler="heap"} 0
-http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
-http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
-http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
-http_request_duration_microseconds_sum{handler="label_values"} 3995.574
-http_request_duration_microseconds_count{handler="label_values"} 3
-http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="options"} 0
-http_request_duration_microseconds_count{handler="options"} 0
-http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
-http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
-http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
-http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
-http_request_duration_microseconds_count{handler="prometheus"} 462
-http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
-http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
-http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
-http_request_duration_microseconds_sum{handler="query"} 26074.11
-http_request_duration_microseconds_count{handler="query"} 6
-http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="query_range"} 0
-http_request_duration_microseconds_count{handler="query_range"} 0
-http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="rules"} 0
-http_request_duration_microseconds_count{handler="rules"} 0
-http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="series"} 0
-http_request_duration_microseconds_count{handler="series"} 0
-http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
-http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
-http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
-http_request_duration_microseconds_sum{handler="static"} 6458.621
-http_request_duration_microseconds_count{handler="static"} 3
-http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="status"} 0
-http_request_duration_microseconds_count{handler="status"} 0
-http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="targets"} 0
-http_request_duration_microseconds_count{handler="targets"} 0
-http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="version"} 0
-http_request_duration_microseconds_count{handler="version"} 0
-http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
-http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
-http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="alerts"} 0
-http_request_size_bytes_count{handler="alerts"} 0
-http_request_size_bytes{handler="config",quantile="0.5"} NaN
-http_request_size_bytes{handler="config",quantile="0.9"} NaN
-http_request_size_bytes{handler="config",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="config"} 0
-http_request_size_bytes_count{handler="config"} 0
-http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
-http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
-http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="consoles"} 0
-http_request_size_bytes_count{handler="consoles"} 0
-http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
-http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
-http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="drop_series"} 0
-http_request_size_bytes_count{handler="drop_series"} 0
-http_request_size_bytes{handler="federate",quantile="0.5"} NaN
-http_request_size_bytes{handler="federate",quantile="0.9"} NaN
-http_request_size_bytes{handler="federate",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="federate"} 0
-http_request_size_bytes_count{handler="federate"} 0
-http_request_size_bytes{handler="flags",quantile="0.5"} NaN
-http_request_size_bytes{handler="flags",quantile="0.9"} NaN
-http_request_size_bytes{handler="flags",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="flags"} 0
-http_request_size_bytes_count{handler="flags"} 0
-http_request_size_bytes{handler="graph",quantile="0.5"} 367
-http_request_size_bytes{handler="graph",quantile="0.9"} 389
-http_request_size_bytes{handler="graph",quantile="0.99"} 389
-http_request_size_bytes_sum{handler="graph"} 1145
-http_request_size_bytes_count{handler="graph"} 3
-http_request_size_bytes{handler="heap",quantile="0.5"} NaN
-http_request_size_bytes{handler="heap",quantile="0.9"} NaN
-http_request_size_bytes{handler="heap",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="heap"} 0
-http_request_size_bytes_count{handler="heap"} 0
-http_request_size_bytes{handler="label_values",quantile="0.5"} 416
-http_request_size_bytes{handler="label_values",quantile="0.9"} 416
-http_request_size_bytes{handler="label_values",quantile="0.99"} 416
-http_request_size_bytes_sum{handler="label_values"} 1248
-http_request_size_bytes_count{handler="label_values"} 3
-http_request_size_bytes{handler="options",quantile="0.5"} NaN
-http_request_size_bytes{handler="options",quantile="0.9"} NaN
-http_request_size_bytes{handler="options",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="options"} 0
-http_request_size_bytes_count{handler="options"} 0
-http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
-http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
-http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
-http_request_size_bytes_sum{handler="prometheus"} 109956
-http_request_size_bytes_count{handler="prometheus"} 462
-http_request_size_bytes{handler="query",quantile="0.5"} 531
-http_request_size_bytes{handler="query",quantile="0.9"} 531
-http_request_size_bytes{handler="query",quantile="0.99"} 531
-http_request_size_bytes_sum{handler="query"} 3186
-http_request_size_bytes_count{handler="query"} 6
-http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
-http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
-http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="query_range"} 0
-http_request_size_bytes_count{handler="query_range"} 0
-http_request_size_bytes{handler="rules",quantile="0.5"} NaN
-http_request_size_bytes{handler="rules",quantile="0.9"} NaN
-http_request_size_bytes{handler="rules",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="rules"} 0
-http_request_size_bytes_count{handler="rules"} 0
-http_request_size_bytes{handler="series",quantile="0.5"} NaN
-http_request_size_bytes{handler="series",quantile="0.9"} NaN
-http_request_size_bytes{handler="series",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="series"} 0
-http_request_size_bytes_count{handler="series"} 0
-http_request_size_bytes{handler="static",quantile="0.5"} 379
-http_request_size_bytes{handler="static",quantile="0.9"} 379
-http_request_size_bytes{handler="static",quantile="0.99"} 379
-http_request_size_bytes_sum{handler="static"} 1137
-http_request_size_bytes_count{handler="static"} 3
-http_request_size_bytes{handler="status",quantile="0.5"} NaN
-http_request_size_bytes{handler="status",quantile="0.9"} NaN
-http_request_size_bytes{handler="status",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="status"} 0
-http_request_size_bytes_count{handler="status"} 0
-http_request_size_bytes{handler="targets",quantile="0.5"} NaN
-http_request_size_bytes{handler="targets",quantile="0.9"} NaN
-http_request_size_bytes{handler="targets",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="targets"} 0
-http_request_size_bytes_count{handler="targets"} 0
-http_request_size_bytes{handler="version",quantile="0.5"} NaN
-http_request_size_bytes{handler="version",quantile="0.9"} NaN
-http_request_size_bytes{handler="version",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="version"} 0
-http_request_size_bytes_count{handler="version"} 0
-http_requests_total{code="200",handler="graph",method="get"} 3
-http_requests_total{code="200",handler="label_values",method="get"} 3
-http_requests_total{code="200",handler="prometheus",method="get"} 462
-http_requests_total{code="200",handler="query",method="get"} 6
-http_requests_total{code="200",handler="static",method="get"} 3
-http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
-http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
-http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="alerts"} 0
-http_response_size_bytes_count{handler="alerts"} 0
-http_response_size_bytes{handler="config",quantile="0.5"} NaN
-http_response_size_bytes{handler="config",quantile="0.9"} NaN
-http_response_size_bytes{handler="config",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="config"} 0
-http_response_size_bytes_count{handler="config"} 0
-http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
-http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
-http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="consoles"} 0
-http_response_size_bytes_count{handler="consoles"} 0
-http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
-http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
-http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="drop_series"} 0
-http_response_size_bytes_count{handler="drop_series"} 0
-http_response_size_bytes{handler="federate",quantile="0.5"} NaN
-http_response_size_bytes{handler="federate",quantile="0.9"} NaN
-http_response_size_bytes{handler="federate",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="federate"} 0
-http_response_size_bytes_count{handler="federate"} 0
-http_response_size_bytes{handler="flags",quantile="0.5"} NaN
-http_response_size_bytes{handler="flags",quantile="0.9"} NaN
-http_response_size_bytes{handler="flags",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="flags"} 0
-http_response_size_bytes_count{handler="flags"} 0
-http_response_size_bytes{handler="graph",quantile="0.5"} 3619
-http_response_size_bytes{handler="graph",quantile="0.9"} 3619
-http_response_size_bytes{handler="graph",quantile="0.99"} 3619
-http_response_size_bytes_sum{handler="graph"} 10857
-http_response_size_bytes_count{handler="graph"} 3
-http_response_size_bytes{handler="heap",quantile="0.5"} NaN
-http_response_size_bytes{handler="heap",quantile="0.9"} NaN
-http_response_size_bytes{handler="heap",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="heap"} 0
-http_response_size_bytes_count{handler="heap"} 0
-http_response_size_bytes{handler="label_values",quantile="0.5"} 642
-http_response_size_bytes{handler="label_values",quantile="0.9"} 642
-http_response_size_bytes{handler="label_values",quantile="0.99"} 642
-http_response_size_bytes_sum{handler="label_values"} 1926
-http_response_size_bytes_count{handler="label_values"} 3
-http_response_size_bytes{handler="options",quantile="0.5"} NaN
-http_response_size_bytes{handler="options",quantile="0.9"} NaN
-http_response_size_bytes{handler="options",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="options"} 0
-http_response_size_bytes_count{handler="options"} 0
-http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
-http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
-http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
-http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
-http_response_size_bytes_count{handler="prometheus"} 462
-http_response_size_bytes{handler="query",quantile="0.5"} 776
-http_response_size_bytes{handler="query",quantile="0.9"} 781
-http_response_size_bytes{handler="query",quantile="0.99"} 781
-http_response_size_bytes_sum{handler="query"} 4656
-http_response_size_bytes_count{handler="query"} 6
-http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
-http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
-http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="query_range"} 0
-http_response_size_bytes_count{handler="query_range"} 0
-http_response_size_bytes{handler="rules",quantile="0.5"} NaN
-http_response_size_bytes{handler="rules",quantile="0.9"} NaN
-http_response_size_bytes{handler="rules",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="rules"} 0
-http_response_size_bytes_count{handler="rules"} 0
-http_response_size_bytes{handler="series",quantile="0.5"} NaN
-http_response_size_bytes{handler="series",quantile="0.9"} NaN
-http_response_size_bytes{handler="series",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="series"} 0
-http_response_size_bytes_count{handler="series"} 0
-http_response_size_bytes{handler="static",quantile="0.5"} 6316
-http_response_size_bytes{handler="static",quantile="0.9"} 6316
-http_response_size_bytes{handler="static",quantile="0.99"} 6316
-http_response_size_bytes_sum{handler="static"} 18948
-http_response_size_bytes_count{handler="static"} 3
-http_response_size_bytes{handler="status",quantile="0.5"} NaN
-http_response_size_bytes{handler="status",quantile="0.9"} NaN
-http_response_size_bytes{handler="status",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="status"} 0
-http_response_size_bytes_count{handler="status"} 0
-http_response_size_bytes{handler="targets",quantile="0.5"} NaN
-http_response_size_bytes{handler="targets",quantile="0.9"} NaN
-http_response_size_bytes{handler="targets",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="targets"} 0
-http_response_size_bytes_count{handler="targets"} 0
-http_response_size_bytes{handler="version",quantile="0.5"} NaN
-http_response_size_bytes{handler="version",quantile="0.9"} NaN
-http_response_size_bytes{handler="version",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="version"} 0
-http_response_size_bytes_count{handler="version"} 0
-prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
-prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
-prometheus_config_last_reload_successful 1
-prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds_count 1
-prometheus_evaluator_iterations_skipped_total 0
-prometheus_notifications_dropped_total 0
-prometheus_notifications_queue_capacity 10000
-prometheus_notifications_queue_length 0
-prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
-prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
-prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_azure_refresh_duration_seconds_sum 0
-prometheus_sd_azure_refresh_duration_seconds_count 0
-prometheus_sd_azure_refresh_failures_total 0
-prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
-prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
-prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_failures_total 0
-prometheus_sd_dns_lookup_failures_total 0
-prometheus_sd_dns_lookups_total 0
-prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_ec2_refresh_duration_seconds_sum 0
-prometheus_sd_ec2_refresh_duration_seconds_count 0
-prometheus_sd_ec2_refresh_failures_total 0
-prometheus_sd_file_read_errors_total 0
-prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_file_scan_duration_seconds_sum 0
-prometheus_sd_file_scan_duration_seconds_count 0
-prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
-prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
-prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
-prometheus_sd_gce_refresh_duration_sum 0
-prometheus_sd_gce_refresh_duration_count 0
-prometheus_sd_gce_refresh_failures_total 0
-prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
-prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
-prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
-prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
-prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_marathon_refresh_duration_seconds_sum 0
-prometheus_sd_marathon_refresh_duration_seconds_count 0
-prometheus_sd_marathon_refresh_failures_total 0
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
-prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
-prometheus_target_interval_length_seconds_count{interval="50ms"} 685
-prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
-prometheus_target_skipped_scrapes_total 0
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
-prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
-prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
-prometheus_treecache_watcher_goroutines 0
-prometheus_treecache_zookeeper_failures_total 0
-# EOF
diff -pruN 2.31.2+ds1-1/pkg/textparse/promtestdata.txt 2.33.5+ds1-2/pkg/textparse/promtestdata.txt
--- 2.31.2+ds1-1/pkg/textparse/promtestdata.txt	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/promtestdata.txt	1970-01-01 00:00:00.000000000 +0000
@@ -1,529 +0,0 @@
-# HELP go_gc_duration_seconds A summary of the GC invocation durations.
-# TYPE go_gc_duration_seconds summary
-go_gc_duration_seconds{quantile="0"} 4.9351e-05
-go_gc_duration_seconds{quantile="0.25"} 7.424100000000001e-05
-go_gc_duration_seconds{quantile="0.5"} 8.3835e-05
-go_gc_duration_seconds{quantile="0.75"} 0.000106744
-go_gc_duration_seconds{quantile="1"} 0.002072195
-go_gc_duration_seconds_sum 0.012139815
-go_gc_duration_seconds_count 99
-# HELP go_goroutines Number of goroutines that currently exist.
-# TYPE go_goroutines gauge
-go_goroutines 33
-# HELP go_memstats_alloc_bytes Number of bytes allocated and still in use.
-# TYPE go_memstats_alloc_bytes gauge
-go_memstats_alloc_bytes 1.7518624e+07
-# HELP go_memstats_alloc_bytes_total Total number of bytes allocated, even if freed.
-# TYPE go_memstats_alloc_bytes_total counter
-go_memstats_alloc_bytes_total 8.3062296e+08
-# HELP go_memstats_buck_hash_sys_bytes Number of bytes used by the profiling bucket hash table.
-# TYPE go_memstats_buck_hash_sys_bytes gauge
-go_memstats_buck_hash_sys_bytes 1.494637e+06
-# HELP go_memstats_frees_total Total number of frees.
-# TYPE go_memstats_frees_total counter
-go_memstats_frees_total 4.65658e+06
-# HELP go_memstats_gc_sys_bytes Number of bytes used for garbage collection system metadata.
-# TYPE go_memstats_gc_sys_bytes gauge
-go_memstats_gc_sys_bytes 1.107968e+06
-# HELP go_memstats_heap_alloc_bytes Number of heap bytes allocated and still in use.
-# TYPE go_memstats_heap_alloc_bytes gauge
-go_memstats_heap_alloc_bytes 1.7518624e+07
-# HELP go_memstats_heap_idle_bytes Number of heap bytes waiting to be used.
-# TYPE go_memstats_heap_idle_bytes gauge
-go_memstats_heap_idle_bytes 6.668288e+06
-# HELP go_memstats_heap_inuse_bytes Number of heap bytes that are in use.
-# TYPE go_memstats_heap_inuse_bytes gauge
-go_memstats_heap_inuse_bytes 1.8956288e+07
-# HELP go_memstats_heap_objects Number of allocated objects.
-# TYPE go_memstats_heap_objects gauge
-go_memstats_heap_objects 72755
-# HELP go_memstats_heap_released_bytes_total Total number of heap bytes released to OS.
-# TYPE go_memstats_heap_released_bytes_total counter
-go_memstats_heap_released_bytes_total 0
-# HELP go_memstats_heap_sys_bytes Number of heap bytes obtained from system.
-# TYPE go_memstats_heap_sys_bytes gauge
-go_memstats_heap_sys_bytes 2.5624576e+07
-# HELP go_memstats_last_gc_time_seconds Number of seconds since 1970 of last garbage collection.
-# TYPE go_memstats_last_gc_time_seconds gauge
-go_memstats_last_gc_time_seconds 1.4843955586166437e+09
-# HELP go_memstats_lookups_total Total number of pointer lookups.
-# TYPE go_memstats_lookups_total counter
-go_memstats_lookups_total 2089
-# HELP go_memstats_mallocs_total Total number of mallocs.
-# TYPE go_memstats_mallocs_total counter
-go_memstats_mallocs_total 4.729335e+06
-# HELP go_memstats_mcache_inuse_bytes Number of bytes in use by mcache structures.
-# TYPE go_memstats_mcache_inuse_bytes gauge
-go_memstats_mcache_inuse_bytes 9600
-# HELP go_memstats_mcache_sys_bytes Number of bytes used for mcache structures obtained from system.
-# TYPE go_memstats_mcache_sys_bytes gauge
-go_memstats_mcache_sys_bytes 16384
-# HELP go_memstats_mspan_inuse_bytes Number of bytes in use by mspan structures.
-# TYPE go_memstats_mspan_inuse_bytes gauge
-go_memstats_mspan_inuse_bytes 211520
-# HELP go_memstats_mspan_sys_bytes Number of bytes used for mspan structures obtained from system.
-# TYPE go_memstats_mspan_sys_bytes gauge
-go_memstats_mspan_sys_bytes 245760
-# HELP go_memstats_next_gc_bytes Number of heap bytes when next garbage collection will take place.
-# TYPE go_memstats_next_gc_bytes gauge
-go_memstats_next_gc_bytes 2.033527e+07
-# HELP go_memstats_other_sys_bytes Number of bytes used for other system allocations.
-# TYPE go_memstats_other_sys_bytes gauge
-go_memstats_other_sys_bytes 2.077323e+06
-# HELP go_memstats_stack_inuse_bytes Number of bytes in use by the stack allocator.
-# TYPE go_memstats_stack_inuse_bytes gauge
-go_memstats_stack_inuse_bytes 1.6384e+06
-# HELP go_memstats_stack_sys_bytes Number of bytes obtained from system for stack allocator.
-# TYPE go_memstats_stack_sys_bytes gauge
-go_memstats_stack_sys_bytes 1.6384e+06
-# HELP go_memstats_sys_bytes Number of bytes obtained by system. Sum of all system allocations.
-# TYPE go_memstats_sys_bytes gauge
-go_memstats_sys_bytes 3.2205048e+07
-# HELP http_request_duration_microseconds The HTTP request latencies in microseconds.
-# TYPE http_request_duration_microseconds summary
-http_request_duration_microseconds{handler="alerts",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="alerts",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="alerts",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="alerts"} 0
-http_request_duration_microseconds_count{handler="alerts"} 0
-http_request_duration_microseconds{handler="config",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="config",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="config",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="config"} 0
-http_request_duration_microseconds_count{handler="config"} 0
-http_request_duration_microseconds{handler="consoles",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="consoles",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="consoles",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="consoles"} 0
-http_request_duration_microseconds_count{handler="consoles"} 0
-http_request_duration_microseconds{handler="drop_series",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="drop_series",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="drop_series",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="drop_series"} 0
-http_request_duration_microseconds_count{handler="drop_series"} 0
-http_request_duration_microseconds{handler="federate",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="federate",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="federate",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="federate"} 0
-http_request_duration_microseconds_count{handler="federate"} 0
-http_request_duration_microseconds{handler="flags",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="flags",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="flags",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="flags"} 0
-http_request_duration_microseconds_count{handler="flags"} 0
-http_request_duration_microseconds{handler="graph",quantile="0.5"} 771.655
-http_request_duration_microseconds{handler="graph",quantile="0.9"} 1761.823
-http_request_duration_microseconds{handler="graph",quantile="0.99"} 1761.823
-http_request_duration_microseconds_sum{handler="graph"} 5803.93
-http_request_duration_microseconds_count{handler="graph"} 3
-http_request_duration_microseconds{handler="heap",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="heap",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="heap",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="heap"} 0
-http_request_duration_microseconds_count{handler="heap"} 0
-http_request_duration_microseconds{handler="label_values",quantile="0.5"} 325.401
-http_request_duration_microseconds{handler="label_values",quantile="0.9"} 414.708
-http_request_duration_microseconds{handler="label_values",quantile="0.99"} 414.708
-http_request_duration_microseconds_sum{handler="label_values"} 3995.574
-http_request_duration_microseconds_count{handler="label_values"} 3
-http_request_duration_microseconds{handler="options",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="options",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="options",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="options"} 0
-http_request_duration_microseconds_count{handler="options"} 0
-http_request_duration_microseconds{handler="prometheus",quantile="0.5"} 1351.859
-http_request_duration_microseconds{handler="prometheus",quantile="0.9"} 1714.035
-http_request_duration_microseconds{handler="prometheus",quantile="0.99"} 2833.523
-http_request_duration_microseconds_sum{handler="prometheus"} 661851.54
-http_request_duration_microseconds_count{handler="prometheus"} 462
-http_request_duration_microseconds{handler="query",quantile="0.5"} 3885.448
-http_request_duration_microseconds{handler="query",quantile="0.9"} 4390.558
-http_request_duration_microseconds{handler="query",quantile="0.99"} 4390.558
-http_request_duration_microseconds_sum{handler="query"} 26074.11
-http_request_duration_microseconds_count{handler="query"} 6
-http_request_duration_microseconds{handler="query_range",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="query_range",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="query_range",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="query_range"} 0
-http_request_duration_microseconds_count{handler="query_range"} 0
-http_request_duration_microseconds{handler="rules",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="rules",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="rules",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="rules"} 0
-http_request_duration_microseconds_count{handler="rules"} 0
-http_request_duration_microseconds{handler="series",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="series",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="series",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="series"} 0
-http_request_duration_microseconds_count{handler="series"} 0
-http_request_duration_microseconds{handler="static",quantile="0.5"} 212.311
-http_request_duration_microseconds{handler="static",quantile="0.9"} 265.174
-http_request_duration_microseconds{handler="static",quantile="0.99"} 265.174
-http_request_duration_microseconds_sum{handler="static"} 6458.621
-http_request_duration_microseconds_count{handler="static"} 3
-http_request_duration_microseconds{handler="status",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="status",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="status",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="status"} 0
-http_request_duration_microseconds_count{handler="status"} 0
-http_request_duration_microseconds{handler="targets",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="targets",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="targets",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="targets"} 0
-http_request_duration_microseconds_count{handler="targets"} 0
-http_request_duration_microseconds{handler="version",quantile="0.5"} NaN
-http_request_duration_microseconds{handler="version",quantile="0.9"} NaN
-http_request_duration_microseconds{handler="version",quantile="0.99"} NaN
-http_request_duration_microseconds_sum{handler="version"} 0
-http_request_duration_microseconds_count{handler="version"} 0
-# HELP http_request_size_bytes The HTTP request sizes in bytes.
-# TYPE http_request_size_bytes summary
-http_request_size_bytes{handler="alerts",quantile="0.5"} NaN
-http_request_size_bytes{handler="alerts",quantile="0.9"} NaN
-http_request_size_bytes{handler="alerts",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="alerts"} 0
-http_request_size_bytes_count{handler="alerts"} 0
-http_request_size_bytes{handler="config",quantile="0.5"} NaN
-http_request_size_bytes{handler="config",quantile="0.9"} NaN
-http_request_size_bytes{handler="config",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="config"} 0
-http_request_size_bytes_count{handler="config"} 0
-http_request_size_bytes{handler="consoles",quantile="0.5"} NaN
-http_request_size_bytes{handler="consoles",quantile="0.9"} NaN
-http_request_size_bytes{handler="consoles",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="consoles"} 0
-http_request_size_bytes_count{handler="consoles"} 0
-http_request_size_bytes{handler="drop_series",quantile="0.5"} NaN
-http_request_size_bytes{handler="drop_series",quantile="0.9"} NaN
-http_request_size_bytes{handler="drop_series",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="drop_series"} 0
-http_request_size_bytes_count{handler="drop_series"} 0
-http_request_size_bytes{handler="federate",quantile="0.5"} NaN
-http_request_size_bytes{handler="federate",quantile="0.9"} NaN
-http_request_size_bytes{handler="federate",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="federate"} 0
-http_request_size_bytes_count{handler="federate"} 0
-http_request_size_bytes{handler="flags",quantile="0.5"} NaN
-http_request_size_bytes{handler="flags",quantile="0.9"} NaN
-http_request_size_bytes{handler="flags",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="flags"} 0
-http_request_size_bytes_count{handler="flags"} 0
-http_request_size_bytes{handler="graph",quantile="0.5"} 367
-http_request_size_bytes{handler="graph",quantile="0.9"} 389
-http_request_size_bytes{handler="graph",quantile="0.99"} 389
-http_request_size_bytes_sum{handler="graph"} 1145
-http_request_size_bytes_count{handler="graph"} 3
-http_request_size_bytes{handler="heap",quantile="0.5"} NaN
-http_request_size_bytes{handler="heap",quantile="0.9"} NaN
-http_request_size_bytes{handler="heap",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="heap"} 0
-http_request_size_bytes_count{handler="heap"} 0
-http_request_size_bytes{handler="label_values",quantile="0.5"} 416
-http_request_size_bytes{handler="label_values",quantile="0.9"} 416
-http_request_size_bytes{handler="label_values",quantile="0.99"} 416
-http_request_size_bytes_sum{handler="label_values"} 1248
-http_request_size_bytes_count{handler="label_values"} 3
-http_request_size_bytes{handler="options",quantile="0.5"} NaN
-http_request_size_bytes{handler="options",quantile="0.9"} NaN
-http_request_size_bytes{handler="options",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="options"} 0
-http_request_size_bytes_count{handler="options"} 0
-http_request_size_bytes{handler="prometheus",quantile="0.5"} 238
-http_request_size_bytes{handler="prometheus",quantile="0.9"} 238
-http_request_size_bytes{handler="prometheus",quantile="0.99"} 238
-http_request_size_bytes_sum{handler="prometheus"} 109956
-http_request_size_bytes_count{handler="prometheus"} 462
-http_request_size_bytes{handler="query",quantile="0.5"} 531
-http_request_size_bytes{handler="query",quantile="0.9"} 531
-http_request_size_bytes{handler="query",quantile="0.99"} 531
-http_request_size_bytes_sum{handler="query"} 3186
-http_request_size_bytes_count{handler="query"} 6
-http_request_size_bytes{handler="query_range",quantile="0.5"} NaN
-http_request_size_bytes{handler="query_range",quantile="0.9"} NaN
-http_request_size_bytes{handler="query_range",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="query_range"} 0
-http_request_size_bytes_count{handler="query_range"} 0
-http_request_size_bytes{handler="rules",quantile="0.5"} NaN
-http_request_size_bytes{handler="rules",quantile="0.9"} NaN
-http_request_size_bytes{handler="rules",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="rules"} 0
-http_request_size_bytes_count{handler="rules"} 0
-http_request_size_bytes{handler="series",quantile="0.5"} NaN
-http_request_size_bytes{handler="series",quantile="0.9"} NaN
-http_request_size_bytes{handler="series",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="series"} 0
-http_request_size_bytes_count{handler="series"} 0
-http_request_size_bytes{handler="static",quantile="0.5"} 379
-http_request_size_bytes{handler="static",quantile="0.9"} 379
-http_request_size_bytes{handler="static",quantile="0.99"} 379
-http_request_size_bytes_sum{handler="static"} 1137
-http_request_size_bytes_count{handler="static"} 3
-http_request_size_bytes{handler="status",quantile="0.5"} NaN
-http_request_size_bytes{handler="status",quantile="0.9"} NaN
-http_request_size_bytes{handler="status",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="status"} 0
-http_request_size_bytes_count{handler="status"} 0
-http_request_size_bytes{handler="targets",quantile="0.5"} NaN
-http_request_size_bytes{handler="targets",quantile="0.9"} NaN
-http_request_size_bytes{handler="targets",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="targets"} 0
-http_request_size_bytes_count{handler="targets"} 0
-http_request_size_bytes{handler="version",quantile="0.5"} NaN
-http_request_size_bytes{handler="version",quantile="0.9"} NaN
-http_request_size_bytes{handler="version",quantile="0.99"} NaN
-http_request_size_bytes_sum{handler="version"} 0
-http_request_size_bytes_count{handler="version"} 0
-# HELP http_requests_total Total number of HTTP requests made.
-# TYPE http_requests_total counter
-http_requests_total{code="200",handler="graph",method="get"} 3
-http_requests_total{code="200",handler="label_values",method="get"} 3
-http_requests_total{code="200",handler="prometheus",method="get"} 462
-http_requests_total{code="200",handler="query",method="get"} 6
-http_requests_total{code="200",handler="static",method="get"} 3
-# HELP http_response_size_bytes The HTTP response sizes in bytes.
-# TYPE http_response_size_bytes summary
-http_response_size_bytes{handler="alerts",quantile="0.5"} NaN
-http_response_size_bytes{handler="alerts",quantile="0.9"} NaN
-http_response_size_bytes{handler="alerts",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="alerts"} 0
-http_response_size_bytes_count{handler="alerts"} 0
-http_response_size_bytes{handler="config",quantile="0.5"} NaN
-http_response_size_bytes{handler="config",quantile="0.9"} NaN
-http_response_size_bytes{handler="config",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="config"} 0
-http_response_size_bytes_count{handler="config"} 0
-http_response_size_bytes{handler="consoles",quantile="0.5"} NaN
-http_response_size_bytes{handler="consoles",quantile="0.9"} NaN
-http_response_size_bytes{handler="consoles",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="consoles"} 0
-http_response_size_bytes_count{handler="consoles"} 0
-http_response_size_bytes{handler="drop_series",quantile="0.5"} NaN
-http_response_size_bytes{handler="drop_series",quantile="0.9"} NaN
-http_response_size_bytes{handler="drop_series",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="drop_series"} 0
-http_response_size_bytes_count{handler="drop_series"} 0
-http_response_size_bytes{handler="federate",quantile="0.5"} NaN
-http_response_size_bytes{handler="federate",quantile="0.9"} NaN
-http_response_size_bytes{handler="federate",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="federate"} 0
-http_response_size_bytes_count{handler="federate"} 0
-http_response_size_bytes{handler="flags",quantile="0.5"} NaN
-http_response_size_bytes{handler="flags",quantile="0.9"} NaN
-http_response_size_bytes{handler="flags",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="flags"} 0
-http_response_size_bytes_count{handler="flags"} 0
-http_response_size_bytes{handler="graph",quantile="0.5"} 3619
-http_response_size_bytes{handler="graph",quantile="0.9"} 3619
-http_response_size_bytes{handler="graph",quantile="0.99"} 3619
-http_response_size_bytes_sum{handler="graph"} 10857
-http_response_size_bytes_count{handler="graph"} 3
-http_response_size_bytes{handler="heap",quantile="0.5"} NaN
-http_response_size_bytes{handler="heap",quantile="0.9"} NaN
-http_response_size_bytes{handler="heap",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="heap"} 0
-http_response_size_bytes_count{handler="heap"} 0
-http_response_size_bytes{handler="label_values",quantile="0.5"} 642
-http_response_size_bytes{handler="label_values",quantile="0.9"} 642
-http_response_size_bytes{handler="label_values",quantile="0.99"} 642
-http_response_size_bytes_sum{handler="label_values"} 1926
-http_response_size_bytes_count{handler="label_values"} 3
-http_response_size_bytes{handler="options",quantile="0.5"} NaN
-http_response_size_bytes{handler="options",quantile="0.9"} NaN
-http_response_size_bytes{handler="options",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="options"} 0
-http_response_size_bytes_count{handler="options"} 0
-http_response_size_bytes{handler="prometheus",quantile="0.5"} 3033
-http_response_size_bytes{handler="prometheus",quantile="0.9"} 3123
-http_response_size_bytes{handler="prometheus",quantile="0.99"} 3128
-http_response_size_bytes_sum{handler="prometheus"} 1.374097e+06
-http_response_size_bytes_count{handler="prometheus"} 462
-http_response_size_bytes{handler="query",quantile="0.5"} 776
-http_response_size_bytes{handler="query",quantile="0.9"} 781
-http_response_size_bytes{handler="query",quantile="0.99"} 781
-http_response_size_bytes_sum{handler="query"} 4656
-http_response_size_bytes_count{handler="query"} 6
-http_response_size_bytes{handler="query_range",quantile="0.5"} NaN
-http_response_size_bytes{handler="query_range",quantile="0.9"} NaN
-http_response_size_bytes{handler="query_range",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="query_range"} 0
-http_response_size_bytes_count{handler="query_range"} 0
-http_response_size_bytes{handler="rules",quantile="0.5"} NaN
-http_response_size_bytes{handler="rules",quantile="0.9"} NaN
-http_response_size_bytes{handler="rules",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="rules"} 0
-http_response_size_bytes_count{handler="rules"} 0
-http_response_size_bytes{handler="series",quantile="0.5"} NaN
-http_response_size_bytes{handler="series",quantile="0.9"} NaN
-http_response_size_bytes{handler="series",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="series"} 0
-http_response_size_bytes_count{handler="series"} 0
-http_response_size_bytes{handler="static",quantile="0.5"} 6316
-http_response_size_bytes{handler="static",quantile="0.9"} 6316
-http_response_size_bytes{handler="static",quantile="0.99"} 6316
-http_response_size_bytes_sum{handler="static"} 18948
-http_response_size_bytes_count{handler="static"} 3
-http_response_size_bytes{handler="status",quantile="0.5"} NaN
-http_response_size_bytes{handler="status",quantile="0.9"} NaN
-http_response_size_bytes{handler="status",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="status"} 0
-http_response_size_bytes_count{handler="status"} 0
-http_response_size_bytes{handler="targets",quantile="0.5"} NaN
-http_response_size_bytes{handler="targets",quantile="0.9"} NaN
-http_response_size_bytes{handler="targets",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="targets"} 0
-http_response_size_bytes_count{handler="targets"} 0
-http_response_size_bytes{handler="version",quantile="0.5"} NaN
-http_response_size_bytes{handler="version",quantile="0.9"} NaN
-http_response_size_bytes{handler="version",quantile="0.99"} NaN
-http_response_size_bytes_sum{handler="version"} 0
-http_response_size_bytes_count{handler="version"} 0
-# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, branch, and goversion from which prometheus was built.
-# TYPE prometheus_build_info gauge
-prometheus_build_info{branch="",goversion="go1.7.3",revision="",version=""} 1
-# HELP prometheus_config_last_reload_success_timestamp_seconds Timestamp of the last successful configuration reload.
-# TYPE prometheus_config_last_reload_success_timestamp_seconds gauge
-prometheus_config_last_reload_success_timestamp_seconds 1.484395547e+09
-# HELP prometheus_config_last_reload_successful Whether the last configuration reload attempt was successful.
-# TYPE prometheus_config_last_reload_successful gauge
-prometheus_config_last_reload_successful 1
-# HELP prometheus_evaluator_duration_seconds The duration of rule group evaluations.
-# TYPE prometheus_evaluator_duration_seconds summary
-prometheus_evaluator_duration_seconds{quantile="0.01"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.05"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.5"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.9"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds{quantile="0.99"} 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds_sum 1.7890000000000002e-06
-prometheus_evaluator_duration_seconds_count 1
-# HELP prometheus_evaluator_iterations_skipped_total The total number of rule group evaluations skipped due to throttled metric storage.
-# TYPE prometheus_evaluator_iterations_skipped_total counter
-prometheus_evaluator_iterations_skipped_total 0
-# HELP prometheus_notifications_dropped_total Total number of alerts dropped due to alert manager missing in configuration.
-# TYPE prometheus_notifications_dropped_total counter
-prometheus_notifications_dropped_total 0
-# HELP prometheus_notifications_queue_capacity The capacity of the alert notifications queue.
-# TYPE prometheus_notifications_queue_capacity gauge
-prometheus_notifications_queue_capacity 10000
-# HELP prometheus_notifications_queue_length The number of alert notifications in the queue.
-# TYPE prometheus_notifications_queue_length gauge
-prometheus_notifications_queue_length 0
-# HELP prometheus_rule_evaluation_failures_total The total number of rule evaluation failures.
-# TYPE prometheus_rule_evaluation_failures_total counter
-prometheus_rule_evaluation_failures_total{rule_type="alerting"} 0
-prometheus_rule_evaluation_failures_total{rule_type="recording"} 0
-# HELP prometheus_sd_azure_refresh_duration_seconds The duration of a Azure-SD refresh in seconds.
-# TYPE prometheus_sd_azure_refresh_duration_seconds summary
-prometheus_sd_azure_refresh_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_azure_refresh_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_azure_refresh_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_azure_refresh_duration_seconds_sum 0
-prometheus_sd_azure_refresh_duration_seconds_count 0
-# HELP prometheus_sd_azure_refresh_failures_total Number of Azure-SD refresh failures.
-# TYPE prometheus_sd_azure_refresh_failures_total counter
-prometheus_sd_azure_refresh_failures_total 0
-# HELP prometheus_sd_consul_rpc_duration_seconds The duration of a Consul RPC call in seconds.
-# TYPE prometheus_sd_consul_rpc_duration_seconds summary
-prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.5"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.9"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="service",endpoint="catalog",quantile="0.99"} NaN
-prometheus_sd_consul_rpc_duration_seconds_sum{call="service",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_duration_seconds_count{call="service",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.5"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.9"} NaN
-prometheus_sd_consul_rpc_duration_seconds{call="services",endpoint="catalog",quantile="0.99"} NaN
-prometheus_sd_consul_rpc_duration_seconds_sum{call="services",endpoint="catalog"} 0
-prometheus_sd_consul_rpc_duration_seconds_count{call="services",endpoint="catalog"} 0
-# HELP prometheus_sd_consul_rpc_failures_total The number of Consul RPC call failures.
-# TYPE prometheus_sd_consul_rpc_failures_total counter
-prometheus_sd_consul_rpc_failures_total 0
-# HELP prometheus_sd_dns_lookup_failures_total The number of DNS-SD lookup failures.
-# TYPE prometheus_sd_dns_lookup_failures_total counter
-prometheus_sd_dns_lookup_failures_total 0
-# HELP prometheus_sd_dns_lookups_total The number of DNS-SD lookups.
-# TYPE prometheus_sd_dns_lookups_total counter
-prometheus_sd_dns_lookups_total 0
-# HELP prometheus_sd_ec2_refresh_duration_seconds The duration of a EC2-SD refresh in seconds.
-# TYPE prometheus_sd_ec2_refresh_duration_seconds summary
-prometheus_sd_ec2_refresh_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_ec2_refresh_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_ec2_refresh_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_ec2_refresh_duration_seconds_sum 0
-prometheus_sd_ec2_refresh_duration_seconds_count 0
-# HELP prometheus_sd_ec2_refresh_failures_total The number of EC2-SD scrape failures.
-# TYPE prometheus_sd_ec2_refresh_failures_total counter
-prometheus_sd_ec2_refresh_failures_total 0
-# HELP prometheus_sd_file_read_errors_total The number of File-SD read errors.
-# TYPE prometheus_sd_file_read_errors_total counter
-prometheus_sd_file_read_errors_total 0
-# HELP prometheus_sd_file_scan_duration_seconds The duration of the File-SD scan in seconds.
-# TYPE prometheus_sd_file_scan_duration_seconds summary
-prometheus_sd_file_scan_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_file_scan_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_file_scan_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_file_scan_duration_seconds_sum 0
-prometheus_sd_file_scan_duration_seconds_count 0
-# HELP prometheus_sd_gce_refresh_duration The duration of a GCE-SD refresh in seconds.
-# TYPE prometheus_sd_gce_refresh_duration summary
-prometheus_sd_gce_refresh_duration{quantile="0.5"} NaN
-prometheus_sd_gce_refresh_duration{quantile="0.9"} NaN
-prometheus_sd_gce_refresh_duration{quantile="0.99"} NaN
-prometheus_sd_gce_refresh_duration_sum 0
-prometheus_sd_gce_refresh_duration_count 0
-# HELP prometheus_sd_gce_refresh_failures_total The number of GCE-SD refresh failures.
-# TYPE prometheus_sd_gce_refresh_failures_total counter
-prometheus_sd_gce_refresh_failures_total 0
-# HELP prometheus_sd_kubernetes_events_total The number of Kubernetes events handled.
-# TYPE prometheus_sd_kubernetes_events_total counter
-prometheus_sd_kubernetes_events_total{event="add",role="endpoints"} 0
-prometheus_sd_kubernetes_events_total{event="add",role="node"} 0
-prometheus_sd_kubernetes_events_total{event="add",role="pod"} 0
-prometheus_sd_kubernetes_events_total{event="add",role="service"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="endpoints"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="node"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="pod"} 0
-prometheus_sd_kubernetes_events_total{event="delete",role="service"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="endpoints"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="node"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="pod"} 0
-prometheus_sd_kubernetes_events_total{event="update",role="service"} 0
-# HELP prometheus_sd_marathon_refresh_duration_seconds The duration of a Marathon-SD refresh in seconds.
-# TYPE prometheus_sd_marathon_refresh_duration_seconds summary
-prometheus_sd_marathon_refresh_duration_seconds{quantile="0.5"} NaN
-prometheus_sd_marathon_refresh_duration_seconds{quantile="0.9"} NaN
-prometheus_sd_marathon_refresh_duration_seconds{quantile="0.99"} NaN
-prometheus_sd_marathon_refresh_duration_seconds_sum 0
-prometheus_sd_marathon_refresh_duration_seconds_count 0
-# HELP prometheus_sd_marathon_refresh_failures_total The number of Marathon-SD refresh failures.
-# TYPE prometheus_sd_marathon_refresh_failures_total counter
-prometheus_sd_marathon_refresh_failures_total 0
-# HELP prometheus_target_interval_length_seconds Actual intervals between scrapes.
-# TYPE prometheus_target_interval_length_seconds summary
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.01"} 0.046182157
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.05"} 0.047306979000000006
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.5"} 0.050381782
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.9"} 0.052614556
-prometheus_target_interval_length_seconds{interval="50ms",quantile="0.99"} 0.054404386000000006
-prometheus_target_interval_length_seconds_sum{interval="50ms"} 34.512091221999995
-prometheus_target_interval_length_seconds_count{interval="50ms"} 685
-# HELP prometheus_target_scrape_pool_sync_total Total number of syncs that were executed on a scrape pool.
-# TYPE prometheus_target_scrape_pool_sync_total counter
-prometheus_target_scrape_pool_sync_total{scrape_job="prometheus"} 1
-# HELP prometheus_target_skipped_scrapes_total Total number of scrapes that were skipped because the metric storage was throttled.
-# TYPE prometheus_target_skipped_scrapes_total counter
-prometheus_target_skipped_scrapes_total 0
-# HELP prometheus_target_sync_length_seconds Actual interval to sync the scrape pool.
-# TYPE prometheus_target_sync_length_seconds summary
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.01"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.05"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.5"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.9"} 0.00020043300000000002
-prometheus_target_sync_length_seconds{scrape_job="prometheus",quantile="0.99"} 0.00020043300000000002
-prometheus_target_sync_length_seconds_sum{scrape_job="prometheus"} 0.00020043300000000002
-prometheus_target_sync_length_seconds_count{scrape_job="prometheus"} 1
-# HELP prometheus_treecache_watcher_goroutines The current number of watcher goroutines.
-# TYPE prometheus_treecache_watcher_goroutines gauge
-prometheus_treecache_watcher_goroutines 0
-# HELP prometheus_treecache_zookeeper_failures_total The total number of ZooKeeper failures.
-# TYPE prometheus_treecache_zookeeper_failures_total counter
-prometheus_treecache_zookeeper_failures_total 0
-# EOF
diff -pruN 2.31.2+ds1-1/pkg/textparse/README.md 2.33.5+ds1-2/pkg/textparse/README.md
--- 2.31.2+ds1-1/pkg/textparse/README.md	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/textparse/README.md	1970-01-01 00:00:00.000000000 +0000
@@ -1,6 +0,0 @@
-# Making changes to textparse lexers
-In the rare case that you need to update the textparse lexers, edit promlex.l or openmetricslex.l and then run the following command: 
-`golex -o=promlex.l.go promlex.l`
-
-Note that you need golex installed: 
-`go get -u modernc.org/golex`
\ No newline at end of file
diff -pruN 2.31.2+ds1-1/pkg/timestamp/timestamp.go 2.33.5+ds1-2/pkg/timestamp/timestamp.go
--- 2.31.2+ds1-1/pkg/timestamp/timestamp.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/timestamp/timestamp.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,34 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package timestamp
-
-import (
-	"math"
-	"time"
-)
-
-// FromTime returns a new millisecond timestamp from a time.
-func FromTime(t time.Time) int64 {
-	return t.Unix()*1000 + int64(t.Nanosecond())/int64(time.Millisecond)
-}
-
-// Time returns a new time.Time object from a millisecond timestamp.
-func Time(ts int64) time.Time {
-	return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC()
-}
-
-// FromFloatSeconds returns a millisecond timestamp from float seconds.
-func FromFloatSeconds(ts float64) int64 {
-	return int64(math.Round(ts * 1000))
-}
diff -pruN 2.31.2+ds1-1/pkg/value/value.go 2.33.5+ds1-2/pkg/value/value.go
--- 2.31.2+ds1-1/pkg/value/value.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/pkg/value/value.go	1970-01-01 00:00:00.000000000 +0000
@@ -1,34 +0,0 @@
-// Copyright 2016 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-package value
-
-import (
-	"math"
-)
-
-const (
-	// NormalNaN is a quiet NaN. This is also math.NaN().
-	NormalNaN uint64 = 0x7ff8000000000001
-
-	// StaleNaN is a signaling NaN, due to the MSB of the mantissa being 0.
-	// This value is chosen with many leading 0s, so we have scope to store more
-	// complicated values in the future. It is 2 rather than 1 to make
-	// it easier to distinguish from the NormalNaN by a human when debugging.
-	StaleNaN uint64 = 0x7ff0000000000002
-)
-
-// IsStaleNaN returns true when the provided NaN value is a stale marker.
-func IsStaleNaN(v float64) bool {
-	return math.Float64bits(v) == StaleNaN
-}
diff -pruN 2.31.2+ds1-1/promql/bench_test.go 2.33.5+ds1-2/promql/bench_test.go
--- 2.31.2+ds1-1/promql/bench_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/promql/bench_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -21,14 +21,15 @@ import (
 	"testing"
 	"time"
 
-	"github.com/prometheus/prometheus/pkg/labels"
+	"github.com/prometheus/prometheus/model/labels"
 	"github.com/prometheus/prometheus/promql/parser"
+	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/util/teststorage"
 )
 
 func BenchmarkRangeQuery(b *testing.B) {
-	storage := teststorage.New(b)
-	defer storage.Close()
+	stor := teststorage.New(b)
+	defer stor.Close()
 	opts := EngineOpts{
 		Logger:     nil,
 		Reg:        nil,
@@ -62,13 +63,13 @@ func BenchmarkRangeQuery(b *testing.B) {
 		}
 		metrics = append(metrics, labels.FromStrings("__name__", "h_hundred", "l", strconv.Itoa(i), "le", "+Inf"))
 	}
-	refs := make([]uint64, len(metrics))
+	refs := make([]storage.SeriesRef, len(metrics))
 
 	// A day of data plus 10k steps.
 	numIntervals := 8640 + 10000
 
 	for s := 0; s < numIntervals; s++ {
-		a := storage.Appender(context.Background())
+		a := stor.Appender(context.Background())
 		ts := int64(s * 10000) // 10s interval.
 		for i, metric := range metrics {
 			ref, _ := a.Append(refs[i], metric, ts, float64(s)+float64(i)/float64(len(metrics)))
@@ -216,7 +217,7 @@ func BenchmarkRangeQuery(b *testing.B) {
 			b.ReportAllocs()
 			for i := 0; i < b.N; i++ {
 				qry, err := engine.NewRangeQuery(
-					storage, c.expr,
+					stor, c.expr,
 					time.Unix(int64((numIntervals-c.steps)*10), 0),
 					time.Unix(int64(numIntervals*10), 0), time.Second*10)
 				if err != nil {
diff -pruN 2.31.2+ds1-1/promql/engine.go 2.33.5+ds1-2/promql/engine.go
--- 2.31.2+ds1-1/promql/engine.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/promql/engine.go	2022-03-08 16:34:32.000000000 +0000
@@ -35,9 +35,9 @@ import (
 	"github.com/prometheus/common/model"
 	"github.com/uber/jaeger-client-go"
 
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/timestamp"
-	"github.com/prometheus/prometheus/pkg/value"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/timestamp"
+	"github.com/prometheus/prometheus/model/value"
 	"github.com/prometheus/prometheus/promql/parser"
 	"github.com/prometheus/prometheus/storage"
 	"github.com/prometheus/prometheus/util/stats"
@@ -87,12 +87,15 @@ type (
 func (e ErrQueryTimeout) Error() string {
 	return fmt.Sprintf("query timed out in %s", string(e))
 }
+
 func (e ErrQueryCanceled) Error() string {
 	return fmt.Sprintf("query was canceled in %s", string(e))
 }
+
 func (e ErrTooManySamples) Error() string {
 	return fmt.Sprintf("query processing would load too many samples into memory in %s", string(e))
 }
+
 func (e ErrStorage) Error() string {
 	return e.Err.Error()
 }
@@ -219,10 +222,17 @@ type EngineOpts struct {
 	// a subquery in milliseconds if no step in range vector was specified `[30m:<step>]`.
 	NoStepSubqueryIntervalFn func(rangeMillis int64) int64
 
-	// EnableAtModifier if true enables @ modifier. Disabled otherwise.
+	// EnableAtModifier if true enables @ modifier. Disabled otherwise. This
+	// is supposed to be enabled for regular PromQL (as of Prometheus v2.33)
+	// but the option to disable it is still provided here for those using
+	// the Engine outside of Prometheus.
 	EnableAtModifier bool
 
-	// EnableNegativeOffset if true enables negative (-) offset values. Disabled otherwise.
+	// EnableNegativeOffset if true enables negative (-) offset
+	// values. Disabled otherwise. This is supposed to be enabled for
+	// regular PromQL (as of Prometheus v2.33) but the option to disable it
+	// is still provided here for those using the Engine outside of
+	// Prometheus.
 	EnableNegativeOffset bool
 }
 
@@ -402,8 +412,10 @@ func (ng *Engine) newQuery(q storage.Que
 	return qry, nil
 }
 
-var ErrValidationAtModifierDisabled = errors.New("@ modifier is disabled")
-var ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
+var (
+	ErrValidationAtModifierDisabled     = errors.New("@ modifier is disabled")
+	ErrValidationNegativeOffsetDisabled = errors.New("negative offset is disabled")
+)
 
 func (ng *Engine) validateOpts(expr parser.Expr) error {
 	if ng.enableAtModifier && ng.enableNegativeOffset {
@@ -1175,7 +1187,9 @@ func (ev *evaluator) eval(expr parser.Ex
 		}
 
 		unwrapParenExpr(&e.Param)
-		if s, ok := unwrapStepInvariantExpr(e.Param).(*parser.StringLiteral); ok {
+		param := unwrapStepInvariantExpr(e.Param)
+		unwrapParenExpr(&param)
+		if s, ok := param.(*parser.StringLiteral); ok {
 			return ev.rangeEval(initSeries, func(v []parser.Value, sh [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
 				return ev.aggregation(e.Op, sortedGrouping, e.Without, s.Val, v[0].(Vector), sh[0], enh), nil
 			}, e.Expr)
@@ -1197,6 +1211,7 @@ func (ev *evaluator) eval(expr parser.Ex
 			// a vector selector.
 			unwrapParenExpr(&e.Args[0])
 			arg := unwrapStepInvariantExpr(e.Args[0])
+			unwrapParenExpr(&arg)
 			vs, ok := arg.(*parser.VectorSelector)
 			if ok {
 				return ev.rangeEval(nil, func(v []parser.Value, _ [][]EvalSeriesHelper, enh *EvalNodeHelper) (Vector, storage.Warnings) {
@@ -1220,6 +1235,7 @@ func (ev *evaluator) eval(expr parser.Ex
 		for i := range e.Args {
 			unwrapParenExpr(&e.Args[i])
 			a := unwrapStepInvariantExpr(e.Args[i])
+			unwrapParenExpr(&a)
 			if _, ok := a.(*parser.MatrixSelector); ok {
 				matrixArgIndex = i
 				matrixArg = true
@@ -1262,7 +1278,10 @@ func (ev *evaluator) eval(expr parser.Ex
 			}
 		}
 
-		sel := unwrapStepInvariantExpr(e.Args[matrixArgIndex]).(*parser.MatrixSelector)
+		unwrapParenExpr(&e.Args[matrixArgIndex])
+		arg := unwrapStepInvariantExpr(e.Args[matrixArgIndex])
+		unwrapParenExpr(&arg)
+		sel := arg.(*parser.MatrixSelector)
 		selVS := sel.VectorSelector.(*parser.VectorSelector)
 
 		ws, err := checkAndExpandSeriesSet(ev.ctx, sel)
@@ -1631,7 +1650,7 @@ func (ev *evaluator) vectorSelectorSingl
 	}
 
 	if ok {
-		t, v = it.Values()
+		t, v = it.At()
 	}
 
 	if !ok || t > refTime {
@@ -1751,7 +1770,7 @@ func (ev *evaluator) matrixIterSlice(it
 	}
 	// The seeked sample might also be in the range.
 	if ok {
-		t, v := it.Values()
+		t, v := it.At()
 		if t == maxt && !value.IsStaleNaN(v) {
 			if ev.currentSamples >= ev.maxSamples {
 				ev.error(ErrTooManySamples(env))
@@ -2139,7 +2158,6 @@ type groupedAggregation struct {
 // aggregation evaluates an aggregation operation on a Vector. The provided grouping labels
 // must be sorted.
 func (ev *evaluator) aggregation(op parser.ItemType, grouping []string, without bool, param interface{}, vec Vector, seriesHelper []EvalSeriesHelper, enh *EvalNodeHelper) Vector {
-
 	result := map[uint64]*groupedAggregation{}
 	orderedResult := []*groupedAggregation{}
 	var k int64
@@ -2509,7 +2527,6 @@ func preprocessExprHelper(expr parser.Ex
 		}
 
 		if isStepInvariant {
-
 			// The function and all arguments are step invariant.
 			return true
 		}
@@ -2555,12 +2572,6 @@ func preprocessExprHelper(expr parser.Ex
 }
 
 func newStepInvariantExpr(expr parser.Expr) parser.Expr {
-	if e, ok := expr.(*parser.ParenExpr); ok {
-		// Wrapping the inside of () makes it easy to unwrap the paren later.
-		// But this effectively unwraps the paren.
-		return newStepInvariantExpr(e.Expr)
-
-	}
 	return &parser.StepInvariantExpr{Expr: expr}
 }
 
diff -pruN 2.31.2+ds1-1/promql/engine_test.go 2.33.5+ds1-2/promql/engine_test.go
--- 2.31.2+ds1-1/promql/engine_test.go	2022-01-21 00:33:16.000000000 +0000
+++ 2.33.5+ds1-2/promql/engine_test.go	2022-03-08 16:34:32.000000000 +0000
@@ -26,8 +26,8 @@ import (
 	"github.com/stretchr/testify/require"
 	"go.uber.org/goleak"
 
-	"github.com/prometheus/prometheus/pkg/labels"
-	"github.com/prometheus/prometheus/pkg/timestamp"
+	"github.com/prometheus/prometheus/model/labels"
+	"github.com/prometheus/prometheus/model/timestamp"
 	"github.com/prometheus/prometheus/promql/parser"
 	"github.com/prometheus/prometheus/storage"
 )
@@ -58,10 +58,19 @@ func TestQueryConcurrency(t *testing.T)
 
 	block := make(chan struct{})
 	processing := make(chan struct{})
+	done := make(chan int)
+	defer close(done)
 
 	f := func(context.Context) error {
-		processing <- struct{}{}
-		<-block
+		select {
+		case processing <- struct{}{}:
+		case <-done:
+		}
+
+		select {
+		case <-block:
+		case <-done:
+		}
 		return nil
 	}
 
@@ -181,9 +190,11 @@ type errQuerier struct {
 func (q *errQuerier) Select(bool, *storage.SelectHints, ...*labels.Matcher) storage.SeriesSet {
 	return errSeriesSet{err: q.err}
 }
+
 func (*errQuerier) LabelValues(string, ...*labels.Matcher) ([]string, storage.Warnings, error) {
 	return nil, nil, nil
 }
+
 func (*errQuerier) LabelNames(...*labels.Matcher) ([]string, storage.Warnings, error) {
 	return nil, nil, nil
 }
@@ -267,277 +278,277 @@ func TestSelectHintsSetCorrectly(t *test
 
 		// TODO(bwplotka): Add support for better hints when subquerying.
 		expected []*storage.SelectHints
-	}{{
-		query: "foo", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 5000, End: 10000},
-		},
-	}, {
-		query: "foo @ 15", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 10000, End: 15000},
-		},
-	}, {
-		query: "foo @ 1", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: -4000, End: 1000},
-		},
-	}, {
-		query: "foo[2m]", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: 80000, End: 200000, Range: 120000},
-		},
-	}, {
-		query: "foo[2m] @ 180", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: 60000, End: 180000, Range: 120000},
-		},
-	}, {
-		query: "foo[2m] @ 300", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: 180000, End: 300000, Range: 120000},
-		},
-	}, {
-		query: "foo[2m] @ 60", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: -60000, End: 60000, Range: 120000},
-		},
-	}, {
-		query: "foo[2m] offset 2m", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 60000, End: 180000, Range: 120000},
-		},
-	}, {
-		query: "foo[2m] @ 200 offset 2m", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: -40000, End: 80000, Range: 120000},
-		},
-	}, {
-		query: "foo[2m:1s]", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 175000, End: 300000},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s])", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 175000, End: 300000, Func: "count_over_time"},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] @ 300)", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: 175000, End: 300000, Func: "count_over_time"},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] @ 200)", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: 75000, End: 200000, Func: "count_over_time"},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] @ 100)", start: 200000,
-		expected: []*storage.SelectHints{
-			{Start: -25000, End: 100000, Func: "count_over_time"},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 165000, End: 290000, Func: "count_over_time"},
-		},
-	}, {
-		query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 155000, End: 280000, Func: "count_over_time"},
-		},
-	}, {
-		// When the @ is on the vector selector, the enclosing subquery parameters
-		// don't affect the hint ranges.
-		query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 185000, End: 190000, Func: "count_over_time"},
-		},
-	}, {
-		// When the @ is on the vector selector, the enclosing subquery parameters
-		// don't affect the hint ranges.
-		query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: 185000, End: 190000, Func: "count_over_time"},
-		},
-	}, {
-		query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
-		expected: []*storage.SelectHints{
-			{Start: -45000, End: 80000, Func: "count_over_time"},
-		},
-	}, {
-
-		query: "foo", start: 10000, end: 20000,
-		expected: []*storage.SelectHints{
-			{Start: 5000, End: 20000, Step: 1000},
-		},
-	}, {
-		query: "foo @ 15", start: 10000, end: 20000,
-		expected: []*storage.SelectHints{
-			{Start: 10000, End: 15000, Step: 1000},
-		},
-	}, {
-		query: "foo @ 1", start: 10000, end: 20000,
-		expected: []*storage.SelectHints{
-			{Start: -4000, End: 1000, Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2m] @ 180)", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2m] @ 300)", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2m] @ 60)", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2m])", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2m:1s])", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 175000, End: 500000, Func: "rate", Step: 1000},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 175000, End: 500000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 165000, End: 490000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 155000, End: 480000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		// When the @ is on the vector selector, the enclosing subquery parameters
-		// don't affect the hint ranges.
-		query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		// When the @ is on the vector selector, the enclosing subquery parameters
-		// don't affect the hint ranges.
-		query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
-		},
-	}, {
-		query: "sum by (dim1) (foo)", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
-		},
-	}, {
-		query: "sum without (dim1) (foo)", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
-		},
-	}, {
-		query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000},
-		},
-	}, {
-		query: "sum by (dim1) (max by (dim2) (foo))", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
-		},
-	}, {
-		query: "(max by (dim1) (foo))[5s:1s]", start: 10000,
-		expected: []*storage.SelectHints{
-			{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}},
-		},
-	}, {
-		query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000,
-		expected: []*storage.SelectHints{
-			{Start: 95000, End: 120000, Func: "sum", By: true},
-			{Start: 95000, End: 120000, Func: "max", By: true},
-		},
-	}, {
-		query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 45000, End: 50000, Step: 1000},
-			{Start: 245000, End: 250000, Step: 1000},
-			{Start: 895000, End: 900000, Step: 1000},
-		},
-	}, {
-		query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 45000, End: 50000, Step: 1000},
-			{Start: 95000, End: 500000, Step: 1000},
-			{Start: 895000, End: 900000, Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000},
-			{Start: 245000, End: 250000, Step: 1000},
-			{Start: 895000, End: 900000, Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
-			{Start: 95000, End: 500000, Step: 1000},
-			{Start: 95000, End: 500000, Step: 1000},
-		},
-	}, {
-		query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000,
-		expected: []*storage.SelectHints{
-			{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
-			{Start: 95000, End: 500000, Step: 1000},
-			{Start: 655000, End: 780000, Step: 1000, Func: "rate"},
-		},
-	}, { // Hints are based on the inner most subquery timestamp.
-		query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000,
-		expected: []*storage.SelectHints{
-			{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"},
-		},
-	}, { // Hints are based on the inner most subquery timestamp.
-		query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`,
-		expected: []*storage.SelectHints{
-			{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"},
+	}{
+		{
+			query: "foo", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 5000, End: 10000},
+			},
+		}, {
+			query: "foo @ 15", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 10000, End: 15000},
+			},
+		}, {
+			query: "foo @ 1", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: -4000, End: 1000},
+			},
+		}, {
+			query: "foo[2m]", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: 80000, End: 200000, Range: 120000},
+			},
+		}, {
+			query: "foo[2m] @ 180", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: 60000, End: 180000, Range: 120000},
+			},
+		}, {
+			query: "foo[2m] @ 300", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: 180000, End: 300000, Range: 120000},
+			},
+		}, {
+			query: "foo[2m] @ 60", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: -60000, End: 60000, Range: 120000},
+			},
+		}, {
+			query: "foo[2m] offset 2m", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 60000, End: 180000, Range: 120000},
+			},
+		}, {
+			query: "foo[2m] @ 200 offset 2m", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: -40000, End: 80000, Range: 120000},
+			},
+		}, {
+			query: "foo[2m:1s]", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 175000, End: 300000},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s])", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 175000, End: 300000, Func: "count_over_time"},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] @ 300)", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: 175000, End: 300000, Func: "count_over_time"},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] @ 200)", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: 75000, End: 200000, Func: "count_over_time"},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] @ 100)", start: 200000,
+			expected: []*storage.SelectHints{
+				{Start: -25000, End: 100000, Func: "count_over_time"},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 165000, End: 290000, Func: "count_over_time"},
+			},
+		}, {
+			query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 155000, End: 280000, Func: "count_over_time"},
+			},
+		}, {
+			// When the @ is on the vector selector, the enclosing subquery parameters
+			// don't affect the hint ranges.
+			query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 185000, End: 190000, Func: "count_over_time"},
+			},
+		}, {
+			// When the @ is on the vector selector, the enclosing subquery parameters
+			// don't affect the hint ranges.
+			query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: 185000, End: 190000, Func: "count_over_time"},
+			},
+		}, {
+			query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000,
+			expected: []*storage.SelectHints{
+				{Start: -45000, End: 80000, Func: "count_over_time"},
+			},
+		}, {
+			query: "foo", start: 10000, end: 20000,
+			expected: []*storage.SelectHints{
+				{Start: 5000, End: 20000, Step: 1000},
+			},
+		}, {
+			query: "foo @ 15", start: 10000, end: 20000,
+			expected: []*storage.SelectHints{
+				{Start: 10000, End: 15000, Step: 1000},
+			},
+		}, {
+			query: "foo @ 1", start: 10000, end: 20000,
+			expected: []*storage.SelectHints{
+				{Start: -4000, End: 1000, Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2m] @ 180)", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 60000, End: 180000, Range: 120000, Func: "rate", Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2m] @ 300)", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 180000, End: 300000, Range: 120000, Func: "rate", Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2m] @ 60)", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: -60000, End: 60000, Range: 120000, Func: "rate", Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2m])", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 80000, End: 500000, Range: 120000, Func: "rate", Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2m] offset 2m)", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 60000, End: 380000, Range: 120000, Func: "rate", Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2m:1s])", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 175000, End: 500000, Func: "rate", Step: 1000},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s])", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 175000, End: 500000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] offset 10s)", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 165000, End: 490000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] @ 300)", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 175000, End: 300000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] @ 200)", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 75000, End: 200000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "count_over_time(foo[2m:1s] @ 100)", start: 200000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: -25000, End: 100000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "count_over_time((foo offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 155000, End: 480000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			// When the @ is on the vector selector, the enclosing subquery parameters
+			// don't affect the hint ranges.
+			query: "count_over_time((foo @ 200 offset 10s)[2m:1s] offset 10s)", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			// When the @ is on the vector selector, the enclosing subquery parameters
+			// don't affect the hint ranges.
+			query: "count_over_time((foo @ 200 offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 185000, End: 190000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "count_over_time((foo offset 10s)[2m:1s] @ 100 offset 10s)", start: 300000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: -45000, End: 80000, Func: "count_over_time", Step: 1000},
+			},
+		}, {
+			query: "sum by (dim1) (foo)", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 5000, End: 10000, Func: "sum", By: true, Grouping: []string{"dim1"}},
+			},
+		}, {
+			query: "sum without (dim1) (foo)", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 5000, End: 10000, Func: "sum", Grouping: []string{"dim1"}},
+			},
+		}, {
+			query: "sum by (dim1) (avg_over_time(foo[1s]))", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 9000, End: 10000, Func: "avg_over_time", Range: 1000},
+			},
+		}, {
+			query: "sum by (dim1) (max by (dim2) (foo))", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 5000, End: 10000, Func: "max", By: true, Grouping: []string{"dim2"}},
+			},
+		}, {
+			query: "(max by (dim1) (foo))[5s:1s]", start: 10000,
+			expected: []*storage.SelectHints{
+				{Start: 0, End: 10000, Func: "max", By: true, Grouping: []string{"dim1"}},
+			},
+		}, {
+			query: "(sum(http_requests{group=~\"p.*\"})+max(http_requests{group=~\"c.*\"}))[20s:5s]", start: 120000,
+			expected: []*storage.SelectHints{
+				{Start: 95000, End: 120000, Func: "sum", By: true},
+				{Start: 95000, End: 120000, Func: "max", By: true},
+			},
+		}, {
+			query: "foo @ 50 + bar @ 250 + baz @ 900", start: 100000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 45000, End: 50000, Step: 1000},
+				{Start: 245000, End: 250000, Step: 1000},
+				{Start: 895000, End: 900000, Step: 1000},
+			},
+		}, {
+			query: "foo @ 50 + bar + baz @ 900", start: 100000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 45000, End: 50000, Step: 1000},
+				{Start: 95000, End: 500000, Step: 1000},
+				{Start: 895000, End: 900000, Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2s] @ 50) + bar @ 250 + baz @ 900", start: 100000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 48000, End: 50000, Step: 1000, Func: "rate", Range: 2000},
+				{Start: 245000, End: 250000, Step: 1000},
+				{Start: 895000, End: 900000, Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2s:1s] @ 50) + bar + baz", start: 100000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
+				{Start: 95000, End: 500000, Step: 1000},
+				{Start: 95000, End: 500000, Step: 1000},
+			},
+		}, {
+			query: "rate(foo[2s:1s] @ 50) + bar + rate(baz[2m:1s] @ 900 offset 2m) ", start: 100000, end: 500000,
+			expected: []*storage.SelectHints{
+				{Start: 43000, End: 50000, Step: 1000, Func: "rate"},
+				{Start: 95000, End: 500000, Step: 1000},
+				{Start: 655000, End: 780000, Step: 1000, Func: "rate"},
+			},
+		}, { // Hints are based on the inner most subquery timestamp.
+			query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 50)[3s:1s] @ 3000`, start: 100000,
+			expected: []*storage.SelectHints{
+				{Start: -150000, End: 50000, Range: 100000, Func: "sum_over_time"},
+			},
+		}, { // Hints are based on the inner most subquery timestamp.
+			query: `sum_over_time(sum_over_time(metric{job="1"}[100s])[100s:25s] @ 3000)[3s:1s] @ 50`,
+			expected: []*storage.SelectHints{
+				{Start: 2800000, End: 3000000, Range: 100000, Func: "sum_over_time"},
+			},
 		},
-	},
 	} {
 		t.Run(tc.query, func(t *testing.T) {
 			engine := NewEngine(opts)
@@ -559,7 +570,6 @@ func TestSelectHintsSetCorrectly(t *test
 
 			require.Equal(t, tc.expected, hintsRecorder.hints)
 		})
-
 	}
 }
 
@@ -645,25 +655,31 @@ load 10s
 		{
 			Query: "metric",
 			Result: Vector{
-				Sample{Point: Point{V: 1, T: 1000},
-					Metric: labels.FromStrings("__name__", "metric")},
+				Sample{
+					Point:  Point{V: 1, T: 1000},
+					Metric: labels.FromStrings("__name__", "metric"),
+				},
 			},
 			Start: time.Unix(1, 0),
 		},
 		{
 			Query: "metric[20s]",
-			Result: Matrix{Series{
-				Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}},
-				Metric: labels.FromStrings("__name__", "metric")},
+			Result: Matrix{
+				Series{
+					Points: []Point{{V: 1, T: 0}, {V: 2, T: 10000}},
+					Metric: labels.FromStrings("__name__", "metric"),
+				},
 			},
 			Start: time.Unix(10, 0),
 		},
 		// Range queries.
 		{
 			Query: "1",
-			Result: Matrix{Series{
-				Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}},
-				Metric: labels.FromStrings()},
+			Result: Matrix{
+				Series{
+					Points: []Point{{V: 1, T: 0}, {V: 1, T: 1000}, {V: 1, T: 2000}},
+					Metric: labels.FromStrings(),
+				},
 			},
 			Start:    time.Unix(0, 0),
 			End:      time.Unix(2, 0),
@@ -671,9 +687,11 @@ load 10s
 		},
 		{
 			Query: "metric",
-			Result: Matrix{Series{
-				Point