From 616e3df4b9d9805e44364a7b7cd7598ca970a3cb Mon Sep 17 00:00:00 2001 From: Geoffrey Gilmore Date: Wed, 17 Jan 2024 15:19:54 -0800 Subject: [PATCH] monitoring: fix alert definition for site configuration by adding scrape job label (#59687) We discovered recently that the definition for the alert that fires if the site configuration hasn't been fetched within 5 minutes strips out the regex that targets individual services (since it uses a grafana variable). This means that every instance of this alert will fire if any individual service trips over this threshold. This PR fixes the issue by adding a new `job` filter for this alert that targets only the services that have that Prometheus scrape target name. This works around the previous issue by using a fixed value for the `job` value instead of a dynamic grafana value. The value of the job filter generally looks like `job=~.*$container_name` (following the strategy from https://sourcegraph.com/github.com/sourcegraph/sourcegraph@9a780f2e694238b5326e3e121d6a1828463001b9/-/blob/monitoring/monitoring/monitoring.go?L161 ) unless I noticed that there was different logic in the existing dashboard for the services. Ex: - `frontend`: already used `job=~"(sourcegraph-)?frontend"` for some metrics, so I used it again here - `worker`: `already used `job=~"^worker.*"` in some metrics, so I used it again and standarized the other existing panels to use the same shared variable ## Test plan I eyeballed the generated alert.md and dashboards.md to verify that my changes looked correct (that is, my refactors resulted in either no diff, or that the diff I generated still looked like valid regex). --- doc/admin/observability/alerts.md | 26 +++++++------- doc/admin/observability/dashboards.md | 36 +++++++++---------- monitoring/definitions/embeddings.go | 5 +++ monitoring/definitions/frontend.go | 7 ++-- monitoring/definitions/git_server.go | 3 ++ monitoring/definitions/repo_updater.go | 3 ++ monitoring/definitions/searcher.go | 3 ++ .../definitions/shared/site_configuration.go | 13 +++++-- monitoring/definitions/symbols.go | 3 ++ monitoring/definitions/worker.go | 9 +++-- 10 files changed, 70 insertions(+), 38 deletions(-) diff --git a/doc/admin/observability/alerts.md b/doc/admin/observability/alerts.md index 39abd452e03..4f84a76aa37 100644 --- a/doc/admin/observability/alerts.md +++ b/doc/admin/observability/alerts.md @@ -639,7 +639,7 @@ Generated query for warning alert: `max((sum by (alert_type) (increase(src_graph
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~"(sourcegraph-)?frontend"}[1m]))) >= 300)`
@@ -1567,7 +1567,7 @@ Generated query for warning alert: `max((sum(src_gitserver_lsremote_queue)) >= 2
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~".*gitserver"}[1m]))) >= 300)`
@@ -3144,9 +3144,9 @@ Generated query for critical alert: `min((sum by (app) (up{app=~".*redis-store"}
Technical details -Generated query for warning alert: `(min((sum(src_worker_jobs{job="worker",job_name="codeintel-upload-janitor"})) < 1)) or (absent(sum(src_worker_jobs{job="worker",job_name="codeintel-upload-janitor"})) == 1)` +Generated query for warning alert: `(min((sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-upload-janitor"})) < 1)) or (absent(sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-upload-janitor"})) == 1)` -Generated query for critical alert: `(min((sum(src_worker_jobs{job="worker",job_name="codeintel-upload-janitor"})) < 1)) or (absent(sum(src_worker_jobs{job="worker",job_name="codeintel-upload-janitor"})) == 1)` +Generated query for critical alert: `(min((sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-upload-janitor"})) < 1)) or (absent(sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-upload-janitor"})) == 1)`
@@ -3182,9 +3182,9 @@ Generated query for critical alert: `(min((sum(src_worker_jobs{job="worker",job_
Technical details -Generated query for warning alert: `(min((sum(src_worker_jobs{job="worker",job_name="codeintel-commitgraph-updater"})) < 1)) or (absent(sum(src_worker_jobs{job="worker",job_name="codeintel-commitgraph-updater"})) == 1)` +Generated query for warning alert: `(min((sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-commitgraph-updater"})) < 1)) or (absent(sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-commitgraph-updater"})) == 1)` -Generated query for critical alert: `(min((sum(src_worker_jobs{job="worker",job_name="codeintel-commitgraph-updater"})) < 1)) or (absent(sum(src_worker_jobs{job="worker",job_name="codeintel-commitgraph-updater"})) == 1)` +Generated query for critical alert: `(min((sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-commitgraph-updater"})) < 1)) or (absent(sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-commitgraph-updater"})) == 1)`
@@ -3220,9 +3220,9 @@ Generated query for critical alert: `(min((sum(src_worker_jobs{job="worker",job_
Technical details -Generated query for warning alert: `(min((sum(src_worker_jobs{job="worker",job_name="codeintel-autoindexing-scheduler"})) < 1)) or (absent(sum(src_worker_jobs{job="worker",job_name="codeintel-autoindexing-scheduler"})) == 1)` +Generated query for warning alert: `(min((sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-autoindexing-scheduler"})) < 1)) or (absent(sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-autoindexing-scheduler"})) == 1)` -Generated query for critical alert: `(min((sum(src_worker_jobs{job="worker",job_name="codeintel-autoindexing-scheduler"})) < 1)) or (absent(sum(src_worker_jobs{job="worker",job_name="codeintel-autoindexing-scheduler"})) == 1)` +Generated query for critical alert: `(min((sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-autoindexing-scheduler"})) < 1)) or (absent(sum(src_worker_jobs{job=~"^worker.*",job_name="codeintel-autoindexing-scheduler"})) == 1)`
@@ -3765,7 +3765,7 @@ Generated query for critical alert: `min((sum by (app) (up{app=~".*worker"}) / c
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~"^worker.*"}[1m]))) >= 300)`
@@ -4497,7 +4497,7 @@ Generated query for critical alert: `min((max by (name) (src_gitlab_rate_limit_r
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~".*repo-updater"}[1m]))) >= 300)`
@@ -4942,7 +4942,7 @@ Generated query for warning alert: `max((sum by (code) (increase(searcher_servic
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~".*searcher"}[1m]))) >= 300)`
@@ -5327,7 +5327,7 @@ Generated query for critical alert: `min((sum by (app) (up{app=~".*searcher"}) /
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~".*symbols"}[1m]))) >= 300)`
@@ -7710,7 +7710,7 @@ Generated query for critical alert: `min((sum by (app) (up{app=~".*otel-collecto
Technical details -Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds[1m]))) >= 300)` +Generated query for critical alert: `max((max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~".*embeddings"}[1m]))) >= 300)`
diff --git a/doc/admin/observability/dashboards.md b/doc/admin/observability/dashboards.md index 18bd3d7b655..47e2f95b15f 100644 --- a/doc/admin/observability/dashboards.md +++ b/doc/admin/observability/dashboards.md @@ -398,7 +398,7 @@ To see this panel, visit `/-/debug/grafana/d/frontend/frontend?viewPanel=100300`
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${internalInstance:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`(sourcegraph-)?frontend`,instance=~`${internalInstance:regex}`}`
@@ -417,7 +417,7 @@ To see this panel, visit `/-/debug/grafana/d/frontend/frontend?viewPanel=100301`
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${internalInstance:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`(sourcegraph-)?frontend`,instance=~`${internalInstance:regex}`}[1m]))`
@@ -7337,7 +7337,7 @@ To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=10110
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${shard:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`.*gitserver`,instance=~`${shard:regex}`}`
@@ -7356,7 +7356,7 @@ To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=10110
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${shard:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`.*gitserver`,instance=~`${shard:regex}`}[1m]))`
@@ -10151,7 +10151,7 @@ To see this panel, visit `/-/debug/grafana/d/worker/worker?viewPanel=100000` on
Technical details -Query: `sum by (job_name) (src_worker_jobs{job="worker"})` +Query: `sum by (job_name) (src_worker_jobs{job=~"^worker.*"})`
@@ -10170,7 +10170,7 @@ To see this panel, visit `/-/debug/grafana/d/worker/worker?viewPanel=100010` on
Technical details -Query: `sum (src_worker_jobs{job="worker", job_name="codeintel-upload-janitor"})` +Query: `sum (src_worker_jobs{job=~"^worker.*", job_name="codeintel-upload-janitor"})`
@@ -10189,7 +10189,7 @@ To see this panel, visit `/-/debug/grafana/d/worker/worker?viewPanel=100011` on
Technical details -Query: `sum (src_worker_jobs{job="worker", job_name="codeintel-commitgraph-updater"})` +Query: `sum (src_worker_jobs{job=~"^worker.*", job_name="codeintel-commitgraph-updater"})`
@@ -10208,7 +10208,7 @@ To see this panel, visit `/-/debug/grafana/d/worker/worker?viewPanel=100012` on
Technical details -Query: `sum (src_worker_jobs{job="worker", job_name="codeintel-autoindexing-scheduler"})` +Query: `sum (src_worker_jobs{job=~"^worker.*", job_name="codeintel-autoindexing-scheduler"})`
@@ -13918,7 +13918,7 @@ To see this panel, visit `/-/debug/grafana/d/worker/worker?viewPanel=103800` on
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`^worker.*`,instance=~`${instance:regex}`}`
@@ -13937,7 +13937,7 @@ To see this panel, visit `/-/debug/grafana/d/worker/worker?viewPanel=103801` on
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`^worker.*`,instance=~`${instance:regex}`}[1m]))`
@@ -15856,7 +15856,7 @@ To see this panel, visit `/-/debug/grafana/d/repo-updater/repo-updater?viewPanel
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`.*repo-updater`,instance=~`${instance:regex}`}`
@@ -15875,7 +15875,7 @@ To see this panel, visit `/-/debug/grafana/d/repo-updater/repo-updater?viewPanel
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`.*repo-updater`,instance=~`${instance:regex}`}[1m]))`
@@ -17442,7 +17442,7 @@ To see this panel, visit `/-/debug/grafana/d/searcher/searcher?viewPanel=100700`
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`.*searcher`,instance=~`${instance:regex}`}`
@@ -17461,7 +17461,7 @@ To see this panel, visit `/-/debug/grafana/d/searcher/searcher?viewPanel=100701`
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`.*searcher`,instance=~`${instance:regex}`}[1m]))`
@@ -19221,7 +19221,7 @@ To see this panel, visit `/-/debug/grafana/d/symbols/symbols?viewPanel=100800` o
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`.*symbols`,instance=~`${instance:regex}`}`
@@ -19240,7 +19240,7 @@ To see this panel, visit `/-/debug/grafana/d/symbols/symbols?viewPanel=100801` o
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`.*symbols`,instance=~`${instance:regex}`}[1m]))`
@@ -32013,7 +32013,7 @@ To see this panel, visit `/-/debug/grafana/d/embeddings/embeddings?viewPanel=100
Technical details -Query: `src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}` +Query: `src_conf_client_time_since_last_successful_update_seconds{job=~`.*embeddings`,instance=~`${instance:regex}`}`
@@ -32032,7 +32032,7 @@ To see this panel, visit `/-/debug/grafana/d/embeddings/embeddings?viewPanel=100
Technical details -Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{instance=~`${instance:regex}`}[1m]))` +Query: `max(max_over_time(src_conf_client_time_since_last_successful_update_seconds{job=~`.*embeddings`,instance=~`${instance:regex}`}[1m]))`
diff --git a/monitoring/definitions/embeddings.go b/monitoring/definitions/embeddings.go index 92a682806c2..5dc77b775be 100644 --- a/monitoring/definitions/embeddings.go +++ b/monitoring/definitions/embeddings.go @@ -1,6 +1,8 @@ package definitions import ( + "fmt" + "github.com/sourcegraph/sourcegraph/monitoring/definitions/shared" "github.com/sourcegraph/sourcegraph/monitoring/monitoring" ) @@ -8,6 +10,8 @@ import ( func Embeddings() *monitoring.Dashboard { const containerName = "embeddings" + scrapeJobRegex := fmt.Sprintf(".*%s", containerName) + return &monitoring.Dashboard{ Name: "embeddings", Title: "Embeddings", @@ -28,6 +32,7 @@ func Embeddings() *monitoring.Dashboard { shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "embeddings", InstanceFilterRegex: `${instance:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), shared.NewDatabaseConnectionsMonitoringGroup(containerName, monitoring.ObservableOwnerCody), shared.NewContainerMonitoringGroup(containerName, monitoring.ObservableOwnerCody, nil), diff --git a/monitoring/definitions/frontend.go b/monitoring/definitions/frontend.go index d50ace3bbce..a8af197c936 100644 --- a/monitoring/definitions/frontend.go +++ b/monitoring/definitions/frontend.go @@ -17,6 +17,8 @@ func Frontend() *monitoring.Dashboard { grpcZoektConfigurationServiceName = "sourcegraph.zoekt.configuration.v1.ZoektConfigurationService" grpcInternalAPIServiceName = "api.internalapi.v1.ConfigService" + + scrapeJobRegex = `(sourcegraph-)?frontend` ) var sentinelSamplingIntervals []string @@ -340,6 +342,7 @@ func Frontend() *monitoring.Dashboard { shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "frontend", InstanceFilterRegex: `${internalInstance:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), shared.CodeIntelligence.NewResolversGroup(containerName), @@ -493,7 +496,7 @@ func Frontend() *monitoring.Dashboard { { Name: "99th_percentile_gitserver_duration", Description: "99th percentile successful gitserver query duration over 5m", - Query: `histogram_quantile(0.99, sum by (le,category)(rate(src_gitserver_request_duration_seconds_bucket{job=~"(sourcegraph-)?frontend"}[5m])))`, + Query: fmt.Sprintf(`histogram_quantile(0.99, sum by (le,category)(rate(src_gitserver_request_duration_seconds_bucket{job=~%q}[5m])))`, scrapeJobRegex), Warning: monitoring.Alert().GreaterOrEqual(20), Panel: monitoring.Panel().LegendFormat("{{category}}").Unit(monitoring.Seconds), Owner: monitoring.ObservableOwnerSource, @@ -502,7 +505,7 @@ func Frontend() *monitoring.Dashboard { { Name: "gitserver_error_responses", Description: "gitserver error responses every 5m", - Query: `sum by (category)(increase(src_gitserver_request_duration_seconds_count{job=~"(sourcegraph-)?frontend",code!~"2.."}[5m])) / ignoring(code) group_left sum by (category)(increase(src_gitserver_request_duration_seconds_count{job=~"(sourcegraph-)?frontend"}[5m])) * 100`, + Query: fmt.Sprintf(`sum by (category)(increase(src_gitserver_request_duration_seconds_count{job=~%q,code!~"2.."}[5m])) / ignoring(code) group_left sum by (category)(increase(src_gitserver_request_duration_seconds_count{job=~%q}[5m])) * 100`, scrapeJobRegex, scrapeJobRegex), Warning: monitoring.Alert().GreaterOrEqual(5).For(15 * time.Minute), Panel: monitoring.Panel().LegendFormat("{{category}}").Unit(monitoring.Percentage), Owner: monitoring.ObservableOwnerSource, diff --git a/monitoring/definitions/git_server.go b/monitoring/definitions/git_server.go index 0c055c9985a..0348d3dcd91 100644 --- a/monitoring/definitions/git_server.go +++ b/monitoring/definitions/git_server.go @@ -14,6 +14,8 @@ func GitServer() *monitoring.Dashboard { grpcServiceName = "gitserver.v1.GitserverService" ) + scrapeJobRegex := fmt.Sprintf(".*%s", containerName) + gitserverHighMemoryNoAlertTransformer := func(observable shared.Observable) shared.Observable { return observable.WithNoAlerts(`Git Server is expected to use up all the memory it is provided.`) } @@ -568,6 +570,7 @@ func GitServer() *monitoring.Dashboard { shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "gitserver", InstanceFilterRegex: `${shard:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), shared.CodeIntelligence.NewCoursierGroup(containerName), diff --git a/monitoring/definitions/repo_updater.go b/monitoring/definitions/repo_updater.go index 46759e19260..bd6180fe0ed 100644 --- a/monitoring/definitions/repo_updater.go +++ b/monitoring/definitions/repo_updater.go @@ -17,6 +17,8 @@ func RepoUpdater() *monitoring.Dashboard { syncDurationThreshold = 9 * time.Hour ) + scrapeJobRegex := fmt.Sprintf(".*%s", containerName) + containerMonitoringOptions := &shared.ContainerMonitoringGroupOptions{ MemoryUsage: func(observable shared.Observable) shared.Observable { return observable.WithWarning(nil).WithCritical(monitoring.Alert().GreaterOrEqual(90).For(10 * time.Minute)) @@ -432,6 +434,7 @@ func RepoUpdater() *monitoring.Dashboard { shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "repo_updater", InstanceFilterRegex: `${instance:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), shared.HTTP.NewHandlersGroup(containerName), shared.NewDatabaseConnectionsMonitoringGroup(containerName, monitoring.ObservableOwnerSource), diff --git a/monitoring/definitions/searcher.go b/monitoring/definitions/searcher.go index b751b8275f3..9414afda47f 100644 --- a/monitoring/definitions/searcher.go +++ b/monitoring/definitions/searcher.go @@ -15,6 +15,8 @@ func Searcher() *monitoring.Dashboard { grpcServiceName = "searcher.v1.SearcherService" ) + scrapeJobRegex := fmt.Sprintf(".*%s", containerName) + grpcMethodVariable := shared.GRPCMethodVariable("searcher", grpcServiceName) // instanceSelector is a helper for inserting the instance selector. @@ -253,6 +255,7 @@ regularly above 0 it is a sign for further investigation.`, shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "searcher", InstanceFilterRegex: `${instance:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), shared.NewDatabaseConnectionsMonitoringGroup(containerName, monitoring.ObservableOwnerInfraOrg), shared.NewContainerMonitoringGroup(containerName, monitoring.ObservableOwnerSearchCore, nil), diff --git a/monitoring/definitions/shared/site_configuration.go b/monitoring/definitions/shared/site_configuration.go index 869cef9e64e..159116d1ef0 100644 --- a/monitoring/definitions/shared/site_configuration.go +++ b/monitoring/definitions/shared/site_configuration.go @@ -6,6 +6,7 @@ import ( "time" "github.com/iancoleman/strcase" + "github.com/sourcegraph/sourcegraph/monitoring/monitoring" ) @@ -20,6 +21,12 @@ type SiteConfigurationMetricsOptions struct { // // Example: (gitserver-0 | gitserver-1) InstanceFilterRegex string + + // JobFilterRegex is the PromQL regex that's used to filter the + // site configuration client metrics to only those emitted by the Prometheus scrape job(s) that were interested in. + // + // Example: `.*gitserver` + JobFilterRegex string } // NewSiteConfigurationClientMetricsGroup creates a group containing site configuration fetching latency statistics for the service @@ -27,6 +34,8 @@ type SiteConfigurationMetricsOptions struct { func NewSiteConfigurationClientMetricsGroup(opts SiteConfigurationMetricsOptions, owner monitoring.ObservableOwner) monitoring.Group { opts.HumanServiceName = strcase.ToSnake(opts.HumanServiceName) + jobFilter := fmt.Sprintf("job=~`%s`", opts.JobFilterRegex) + metric := func(base string, labelFilters ...string) string { metric := base @@ -49,7 +58,7 @@ func NewSiteConfigurationClientMetricsGroup(opts SiteConfigurationMetricsOptions { Name: fmt.Sprintf("%s_site_configuration_duration_since_last_successful_update_by_instance", opts.HumanServiceName), Description: "duration since last successful site configuration update (by instance)", - Query: metric("src_conf_client_time_since_last_successful_update_seconds"), + Query: metric("src_conf_client_time_since_last_successful_update_seconds", jobFilter), Panel: monitoring.Panel().LegendFormat("{{instance}}").Unit(monitoring.Seconds), Owner: owner, NoAlert: true, @@ -58,7 +67,7 @@ func NewSiteConfigurationClientMetricsGroup(opts SiteConfigurationMetricsOptions { Name: fmt.Sprintf("%s_site_configuration_duration_since_last_successful_update_by_instance", opts.HumanServiceName), Description: fmt.Sprintf("maximum duration since last successful site configuration update (all %q instances)", opts.HumanServiceName), - Query: fmt.Sprintf("max(max_over_time(%s[1m]))", metric("src_conf_client_time_since_last_successful_update_seconds")), + Query: fmt.Sprintf("max(max_over_time(%s[1m]))", metric("src_conf_client_time_since_last_successful_update_seconds", jobFilter)), Panel: monitoring.Panel().Unit(monitoring.Seconds), Owner: owner, Critical: monitoring.Alert().GreaterOrEqual((5 * time.Minute).Seconds()), diff --git a/monitoring/definitions/symbols.go b/monitoring/definitions/symbols.go index dd9e63feca2..29a31310c43 100644 --- a/monitoring/definitions/symbols.go +++ b/monitoring/definitions/symbols.go @@ -13,6 +13,8 @@ func Symbols() *monitoring.Dashboard { grpcServiceName = "symbols.v1.SymbolsService" ) + scrapeJobRegex := fmt.Sprintf(".*%s", containerName) + grpcMethodVariable := shared.GRPCMethodVariable("symbols", grpcServiceName) return &monitoring.Dashboard{ @@ -70,6 +72,7 @@ func Symbols() *monitoring.Dashboard { shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "symbols", InstanceFilterRegex: `${instance:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), shared.NewDatabaseConnectionsMonitoringGroup(containerName, monitoring.ObservableOwnerInfraOrg), shared.NewContainerMonitoringGroup(containerName, monitoring.ObservableOwnerCodeIntel, nil), diff --git a/monitoring/definitions/worker.go b/monitoring/definitions/worker.go index 4f865efad0a..df9d4c6073d 100644 --- a/monitoring/definitions/worker.go +++ b/monitoring/definitions/worker.go @@ -11,6 +11,8 @@ import ( func Worker() *monitoring.Dashboard { const containerName = "worker" + scrapeJobRegex := fmt.Sprintf("^%s.*", containerName) + workerJobs := []struct { Name string Owner monitoring.ObservableOwner @@ -25,7 +27,7 @@ func Worker() *monitoring.Dashboard { activeJobObservables = append(activeJobObservables, monitoring.Observable{ Name: fmt.Sprintf("worker_job_%s_count", job.Name), Description: fmt.Sprintf("number of worker instances running the %s job", job.Name), - Query: fmt.Sprintf(`sum (src_worker_jobs{job="worker", job_name="%s"})`, job.Name), + Query: fmt.Sprintf(`sum (src_worker_jobs{job=~%q, job_name="%s"})`, scrapeJobRegex, job.Name), Panel: monitoring.Panel().LegendFormat(fmt.Sprintf("instances running %s", job.Name)), DataMustExist: true, Warning: monitoring.Alert().Less(1).For(1 * time.Minute), @@ -66,7 +68,7 @@ func Worker() *monitoring.Dashboard { { Name: "worker_job_count", Description: "number of worker instances running each job", - Query: `sum by (job_name) (src_worker_jobs{job="worker"})`, + Query: fmt.Sprintf(`sum by (job_name) (src_worker_jobs{job=~%q})`, scrapeJobRegex), Panel: monitoring.Panel().LegendFormat("instances running {{job_name}}"), NoAlert: true, Interpretation: ` @@ -245,7 +247,7 @@ func Worker() *monitoring.Dashboard { Name: "insights_queue_unutilized_size", Description: "insights queue size that is not utilized (not processing)", Owner: monitoring.ObservableOwnerCodeInsights, - Query: "max(src_query_runner_worker_total{job=~\"^worker.*\"}) > 0 and on(job) sum by (op)(increase(src_workerutil_dbworker_store_insights_query_runner_jobs_store_total{job=~\"^worker.*\",op=\"Dequeue\"}[5m])) < 1", + Query: fmt.Sprintf("max(src_query_runner_worker_total{job=~%q}) > 0 and on(job) sum by (op)(increase(src_workerutil_dbworker_store_insights_query_runner_jobs_store_total{job=~%q,op=\"Dequeue\"}[5m])) < 1", scrapeJobRegex, scrapeJobRegex), DataMustExist: false, Warning: monitoring.Alert().Greater(0.0).For(time.Minute * 30), NextSteps: "Verify code insights worker job has successfully started. Restart worker service and monitoring startup logs, looking for worker panics.", @@ -270,6 +272,7 @@ func Worker() *monitoring.Dashboard { shared.NewSiteConfigurationClientMetricsGroup(shared.SiteConfigurationMetricsOptions{ HumanServiceName: "worker", InstanceFilterRegex: `${instance:regex}`, + JobFilterRegex: scrapeJobRegex, }, monitoring.ObservableOwnerInfraOrg), }, }