From 1ae6cc6bfd6993cd99ae92fd22f6b57f2c3bf3e1 Mon Sep 17 00:00:00 2001 From: William Bezuidenhout Date: Wed, 18 Oct 2023 16:29:08 +0100 Subject: [PATCH] logger: update log lib and remove use of description (#57690) * log: remove use of description paramter in Scoped * temporarily point to sglog branch * bazel configure + gazelle * remove additional use of description param * use latest versions of zoekt,log,mountinfo * go.mod --- cmd/cody-gateway/internal/actor/actor.go | 2 +- .../internal/actor/dotcomuser/dotcomuser.go | 2 +- .../productsubscription.go | 2 +- cmd/cody-gateway/internal/actor/source.go | 6 +-- cmd/cody-gateway/internal/events/buffered.go | 2 +- cmd/cody-gateway/internal/events/events.go | 2 +- .../internal/httpapi/completions/upstream.go | 2 +- .../internal/httpapi/diagnostics.go | 2 +- .../internal/httpapi/embeddings/handler.go | 2 +- .../internal/notify/rate_limit.go | 2 +- cmd/cody-gateway/shared/main.go | 6 +-- .../internal/apiclient/files/client.go | 2 +- .../internal/apiclient/queue/client.go | 2 +- cmd/executor/internal/run/run.go | 4 +- cmd/executor/internal/run/testvm.go | 2 +- cmd/executor/internal/run/util.go | 2 +- cmd/executor/internal/worker/runner/docker.go | 2 +- .../internal/worker/runner/firecracker.go | 2 +- .../internal/worker/runner/kubernetes.go | 2 +- cmd/executor/internal/worker/runner/shell.go | 2 +- cmd/executor/internal/worker/worker.go | 6 +-- cmd/executor/main.go | 2 +- cmd/frontend/auth/user.go | 2 +- cmd/frontend/backend/external_services.go | 16 +++---- cmd/frontend/backend/inventory.go | 2 +- cmd/frontend/backend/repos.go | 2 +- cmd/frontend/backend/user_emails.go | 10 ++-- cmd/frontend/codyapp/update_check_handler.go | 2 +- cmd/frontend/graphqlbackend/access_tokens.go | 4 +- .../graphqlbackend/external_service.go | 8 ++-- .../graphqlbackend/external_services.go | 8 ++-- cmd/frontend/graphqlbackend/git_commit.go | 2 +- cmd/frontend/graphqlbackend/graphqlbackend.go | 4 +- .../graphqlbackend/perforce_changelist.go | 2 +- cmd/frontend/graphqlbackend/repositories.go | 2 +- cmd/frontend/graphqlbackend/repository.go | 2 +- cmd/frontend/graphqlbackend/search.go | 2 +- cmd/frontend/graphqlbackend/search_results.go | 4 +- .../search_results_stats_languages.go | 2 +- .../graphqlbackend/send_test_email.go | 2 +- .../graphqlbackend/settings_cascade.go | 4 +- cmd/frontend/graphqlbackend/site.go | 2 +- cmd/frontend/graphqlbackend/site_admin.go | 2 +- .../graphqlbackend/status_messages.go | 2 +- cmd/frontend/graphqlbackend/user.go | 8 ++-- cmd/frontend/graphqlbackend/user_emails.go | 2 +- cmd/frontend/graphqlbackend/users_create.go | 2 +- .../users_randomize_password.go | 2 +- cmd/frontend/graphqlbackend/webhook_logs.go | 2 +- cmd/frontend/internal/app/app.go | 2 +- cmd/frontend/internal/app/debug.go | 4 +- cmd/frontend/internal/app/editor.go | 2 +- .../internal/app/jscontext/jscontext.go | 2 +- .../internal/app/otlpadapter/adapter.go | 2 +- cmd/frontend/internal/app/resolvers/app.go | 2 +- cmd/frontend/internal/app/sign_out.go | 2 +- cmd/frontend/internal/app/ui/handlers.go | 2 +- cmd/frontend/internal/app/ui/help.go | 2 +- cmd/frontend/internal/app/ui/landing.go | 2 +- cmd/frontend/internal/app/ui/router.go | 4 +- cmd/frontend/internal/app/verify_email.go | 2 +- .../internal/auth/azureoauth/provider.go | 2 +- .../auth/bitbucketcloudoauth/config.go | 2 +- .../internal/auth/githubappauth/init.go | 2 +- .../internal/auth/githuboauth/config.go | 2 +- .../internal/auth/githuboauth/session.go | 2 +- .../internal/auth/gitlaboauth/config.go | 2 +- .../internal/auth/gitlaboauth/login.go | 2 +- .../internal/auth/httpheader/config.go | 2 +- cmd/frontend/internal/auth/init.go | 4 +- .../internal/auth/oauth/middleware.go | 2 +- .../internal/auth/openidconnect/config.go | 2 +- cmd/frontend/internal/auth/saml/config.go | 2 +- .../auth/sourcegraphoperator/config.go | 2 +- .../auth/sourcegraphoperator/middleware.go | 2 +- cmd/frontend/internal/authz/init.go | 2 +- .../internal/authz/resolvers/resolver.go | 2 +- .../internal/batches/httpapi/file_handler.go | 2 +- cmd/frontend/internal/batches/init.go | 2 +- .../bg/delete_old_event_logs_in_postgres.go | 4 +- .../internal/bg/update_permissions.go | 2 +- cmd/frontend/internal/cli/config.go | 12 ++--- cmd/frontend/internal/cli/http.go | 4 +- cmd/frontend/internal/cli/serve_cmd.go | 2 +- cmd/frontend/internal/codeintel/init.go | 2 +- cmd/frontend/internal/codemonitors/init.go | 2 +- cmd/frontend/internal/completions/init.go | 2 +- cmd/frontend/internal/compute/init.go | 2 +- cmd/frontend/internal/contentlibrary/init.go | 2 +- cmd/frontend/internal/dotcom/init.go | 4 +- .../productsubscription/license_anomaly.go | 2 +- .../license_check_handler.go | 4 +- .../productsubscription/license_expiration.go | 2 +- .../internal/executorqueue/handler/handler.go | 5 +- .../executorqueue/handler/multihandler.go | 8 ++-- cmd/frontend/internal/executorqueue/init.go | 2 +- .../executorqueue/queues/batches/queue.go | 2 +- cmd/frontend/internal/httpapi/auth.go | 2 +- cmd/frontend/internal/httpapi/httpapi.go | 8 ++-- .../internal/httpapi/releasecache/cache.go | 2 +- .../internal/httpapi/releasecache/http.go | 4 +- cmd/frontend/internal/httpapi/repo_refresh.go | 2 +- cmd/frontend/internal/httpapi/src_cli.go | 2 +- .../httpapi/webhookhandlers/handlers.go | 2 +- .../insights/resolvers/admin_resolver.go | 2 +- .../resolvers/aggregates_resolvers.go | 2 +- .../resolvers/insight_series_resolver.go | 4 +- .../resolvers/live_preview_resolvers.go | 2 +- .../internal/insights/resolvers/resolver.go | 6 +-- .../resolvers/scoped_insight_resolvers.go | 2 +- cmd/frontend/internal/licensing/init/init.go | 2 +- cmd/frontend/internal/own/init.go | 2 +- cmd/frontend/internal/rbac/init.go | 2 +- .../internal/repos/webhooks/handlers.go | 8 ++-- cmd/frontend/internal/search/search.go | 2 +- cmd/frontend/internal/telemetry/init.go | 2 +- cmd/frontend/shared/shared.go | 2 +- cmd/frontend/webhooks/github_webhooks.go | 2 +- cmd/frontend/webhooks/webhooks.go | 2 +- cmd/gitserver/internal/cleanup.go | 6 +-- cmd/gitserver/internal/executil/executil.go | 2 +- cmd/gitserver/internal/git/cleanup.go | 2 +- cmd/gitserver/internal/git/git.go | 2 +- cmd/gitserver/internal/gitserverfs/initfs.go | 2 +- cmd/gitserver/internal/gitservice.go | 2 +- .../internal/integration_tests/test_utils.go | 4 +- cmd/gitserver/internal/p4exec.go | 2 +- cmd/gitserver/internal/p4exec_test.go | 2 +- cmd/gitserver/internal/patch.go | 4 +- cmd/gitserver/internal/perforce/perforce.go | 4 +- cmd/gitserver/internal/server.go | 26 +++++----- cmd/gitserver/internal/servermetrics.go | 2 +- cmd/gitserver/internal/sshagent/ssh_agent.go | 2 +- .../internal/vcssyncer/customfetch.go | 2 +- .../internal/vcssyncer/go_modules.go | 2 +- .../internal/vcssyncer/jvm_packages.go | 6 +-- .../internal/vcssyncer/npm_packages.go | 4 +- .../internal/vcssyncer/python_packages.go | 4 +- .../internal/vcssyncer/ruby_packages.go | 2 +- .../internal/vcssyncer/rust_packages.go | 2 +- cmd/gitserver/shared/shared.go | 10 ++-- cmd/loadtest/main.go | 2 +- cmd/migrator/main.go | 2 +- cmd/pings/shared/metrics.go | 2 +- .../shared/shared.go | 4 +- .../internal/authz/perms_syncer.go | 8 ++-- .../internal/authz/perms_syncer_worker.go | 4 +- cmd/repo-updater/shared/main.go | 14 +++--- cmd/searcher/internal/search/mmap.go | 2 +- cmd/searcher/internal/search/search.go | 2 +- cmd/searcher/shared/shared.go | 2 +- cmd/server/shared/observability.go | 2 +- cmd/server/shared/shared.go | 2 +- cmd/symbols/internal/api/handler.go | 6 +-- cmd/symbols/parser/parser.go | 2 +- cmd/symbols/shared/setup.go | 4 +- cmd/symbols/shared/sqlite.go | 2 +- .../diagnosticsserver/diagnosticsserver.go | 2 +- .../internal/server/server.go | 2 +- cmd/telemetry-gateway/shared/main.go | 4 +- .../batches/bulk_operation_processor_job.go | 4 +- cmd/worker/internal/batches/dbstore.go | 10 ++-- cmd/worker/internal/batches/janitor_job.go | 10 ++-- cmd/worker/internal/batches/reconciler_job.go | 4 +- .../workers/batch_spec_resolution_worker.go | 2 +- .../batches/workspace_resolver_job.go | 2 +- .../internal/codygateway/usageworker.go | 2 +- .../executors/multiqueue_cache_cleaner.go | 4 +- cmd/worker/internal/githubapps/job.go | 2 +- cmd/worker/internal/licensecheck/check.go | 2 +- cmd/worker/internal/outboundwebhooks/job.go | 2 +- .../permissions/bitbucket_projects.go | 10 ++-- cmd/worker/internal/ratelimit/job.go | 2 +- .../internal/search/exhaustive_search.go | 2 +- .../internal/search/exhaustive_search_repo.go | 2 +- .../search/exhaustive_search_repo_revision.go | 2 +- cmd/worker/internal/search/job.go | 2 +- .../telemetrygatewayexporter/exporter.go | 2 +- cmd/worker/shared/main.go | 4 +- deps.bzl | 12 ++--- dev/build-tracker/build/build.go | 2 +- dev/build-tracker/main.go | 4 +- dev/build-tracker/notify/slack.go | 2 +- dev/ci/gen-pipeline.go | 2 +- dev/ci/integration/executors/tester/main.go | 2 +- dev/ci/internal/ci/wolfi_operations.go | 2 +- dev/deployment-notifier/main.go | 2 +- .../app-discover-repos/app-discover-repos.go | 2 +- dev/internal/cmd/search-plan/search-plan.go | 2 +- dev/scaletesting/bulkreposettings/main.go | 4 +- dev/scaletesting/codehostcopy/bitbucket.go | 2 +- dev/scaletesting/codehostcopy/dummy.go | 2 +- dev/scaletesting/codehostcopy/main.go | 6 +-- dev/sg/internal/migration/squash.go | 2 +- dev/sg/internal/rfc/rfc.go | 2 +- dev/sg/sg_audit.go | 2 +- dev/sg/sg_db.go | 10 ++-- dev/sg/sg_insights.go | 2 +- dev/sg/sg_migration.go | 2 +- dev/sg/sg_page.go | 2 +- .../prometheus/cmd/prom-wrapper/main.go | 4 +- .../prometheus/cmd/prom-wrapper/status.go | 2 +- go.mod | 6 +-- go.sum | 12 ++--- internal/adminanalytics/cache.go | 2 +- internal/api/internalapi/client.go | 2 +- internal/audit/integration/cmd/main.go | 2 +- internal/auth/accessrequest/handlers.go | 2 +- internal/auth/userpasswd/handlers.go | 10 ++-- internal/auth/userpasswd/reset_password.go | 4 +- internal/authz/providers/authz.go | 2 +- .../authz/providers/azuredevops/provider.go | 2 +- internal/authz/providers/github/github.go | 4 +- internal/authz/providers/perforce/authz.go | 2 +- .../perforce/cmd/scanprotects/main.go | 2 +- internal/authz/providers/perforce/protects.go | 8 ++-- internal/batches/background.go | 2 +- internal/batches/reconciler/executor.go | 2 +- internal/batches/service/service.go | 4 +- .../batches/service/workspace_resolver.go | 2 +- internal/batches/sources/sources.go | 2 +- internal/batches/state/state.go | 2 +- .../store/worker_workspace_execution.go | 2 +- internal/batches/syncer/syncer.go | 2 +- .../background/dependencies/job_resetters.go | 4 +- .../autoindexing/internal/background/init.go | 4 +- .../autoindexing/internal/inference/infer.go | 2 +- .../autoindexing/internal/inference/init.go | 2 +- .../internal/jobselector/job_selector.go | 2 +- .../autoindexing/internal/store/store.go | 2 +- internal/codeintel/autoindexing/service.go | 2 +- internal/codeintel/codenav/service.go | 2 +- .../codeintel/context/internal/store/store.go | 2 +- .../policies/internal/store/store.go | 2 +- .../codeintel/ranking/internal/store/store.go | 2 +- .../internal/background/downloader/job.go | 4 +- .../sentinel/internal/store/store.go | 2 +- .../background/processor/job_resetters.go | 2 +- .../uploads/internal/store/cleanup_test.go | 2 +- .../codeintel/uploads/internal/store/store.go | 2 +- .../uploads/transport/http/auth/github.go | 2 +- .../uploads/transport/http/handler.go | 2 +- .../codeintel/uploads/transport/http/init.go | 1 - .../codemonitors/background/background.go | 4 +- internal/codemonitors/background/metrics.go | 4 +- internal/codemonitors/background/workers.go | 6 +-- internal/completions/client/observe.go | 2 +- internal/completions/httpapi/chat.go | 2 +- .../completions/httpapi/codecompletion.go | 2 +- internal/compute/output_command.go | 2 +- internal/compute/replace_command.go | 2 +- internal/conf/client.go | 2 +- internal/conf/conf.go | 2 +- internal/database/assigned_owners.go | 2 +- internal/database/assigned_teams.go | 2 +- internal/database/basestore/handle.go | 4 +- internal/database/batch/batch.go | 2 +- internal/database/code_monitors.go | 2 +- internal/database/conf.go | 2 +- internal/database/database.go | 6 +-- internal/database/dbconn/rds/rds.go | 4 +- internal/database/migration/cliutil/addlog.go | 2 +- .../database/migration/store/extractor.go | 2 +- internal/database/recent_view_signal.go | 2 +- internal/database/security_event_logs.go | 2 +- internal/debugserver/grpcui.go | 2 +- internal/endpoint/endpoint.go | 4 +- internal/endpoint/k8s.go | 4 +- internal/extsvc/azuredevops/client.go | 4 +- internal/extsvc/bitbucketcloud/client.go | 2 +- internal/extsvc/bitbucketserver/client.go | 2 +- internal/extsvc/crates/client.go | 2 +- internal/extsvc/gerrit/client.go | 2 +- internal/extsvc/github/globallock.go | 2 +- internal/extsvc/github/v3.go | 4 +- internal/extsvc/github/v4.go | 18 +++---- internal/extsvc/gitlab/client.go | 8 ++-- internal/extsvc/gomodproxy/client.go | 2 +- internal/extsvc/npm/npm.go | 2 +- internal/extsvc/npm/observability.go | 2 +- internal/extsvc/pagure/client.go | 2 +- internal/extsvc/pypi/client.go | 2 +- internal/extsvc/rubygems/client.go | 2 +- internal/extsvc/versions/sync.go | 2 +- internal/gitserver/addrs.go | 4 +- internal/gitserver/client.go | 2 +- internal/gitserver/git_command.go | 2 +- internal/gitserver/observability.go | 2 +- internal/gitserver/search/search.go | 2 +- internal/goroutine/periodic.go | 2 +- internal/grpc/internalerrs/logging.go | 26 +++++----- internal/highlight/highlight.go | 2 +- internal/httpcli/client.go | 2 +- internal/httpcli/redis_logger_middleware.go | 2 +- internal/insights/background/background.go | 16 +++---- internal/insights/background/data_prune.go | 2 +- internal/insights/background/license_check.go | 2 +- .../insights/background/limiter/historical.go | 2 +- .../background/limiter/search_query.go | 2 +- .../background/pings/insights_ping_emitter.go | 2 +- .../insights/background/queryrunner/search.go | 6 +-- .../insights/background/queryrunner/worker.go | 2 +- internal/insights/pipeline/backfill.go | 2 +- .../insights/query/capture_group_executor.go | 2 +- internal/insights/query/compute_executor.go | 2 +- .../insights/query/streaming/search_client.go | 2 +- .../query/streaming_query_executor.go | 2 +- .../backfill_state_inprogress_handler.go | 4 +- .../scheduler/backfill_state_new_handler.go | 2 +- internal/insights/store/permissions.go | 2 +- internal/luasandbox/init.go | 2 +- internal/metrics/metrics.go | 2 +- internal/observation/context.go | 5 +- .../batches/extsvc_webhook_migrator.go | 2 +- .../migrations/batches/ssh_migrator.go | 2 +- .../migrations/insights/migrator.go | 2 +- internal/oobmigration/runner.go | 2 +- internal/own/background/background.go | 2 +- internal/pubsub/topic.go | 2 +- internal/ratelimit/globallimiter.go | 2 +- internal/repos/github.go | 4 +- internal/repos/scheduler/scheduler.go | 6 +-- internal/repos/sources.go | 18 +++---- internal/repos/sync_worker.go | 6 +-- internal/repos/syncer.go | 4 +- internal/repoupdater/client.go | 2 +- internal/rockskip/server.go | 2 +- internal/scim/user_service.go | 6 +-- internal/search/backend/metered_searcher.go | 2 +- internal/search/backend/zoekt.go | 2 +- internal/search/env.go | 2 +- internal/search/exhaustive/service/service.go | 2 +- internal/search/streaming/client/metadata.go | 2 +- internal/service/svcmain/svcmain.go | 10 ++-- internal/symbols/client.go | 2 +- .../telemetryrecorder/telemetryrecorder.go | 2 +- internal/trace/httptrace.go | 2 +- internal/tracer/tracer.go | 2 +- internal/ttlcache/cache.go | 2 +- internal/updatecheck/client.go | 6 +-- internal/updatecheck/handler.go | 2 +- internal/usagestats/code_insights.go | 2 +- internal/usagestats/search_jobs.go | 2 +- internal/users/update_aggregated_stats_job.go | 2 +- internal/workerutil/dbworker/metrics.go | 2 +- internal/workerutil/worker.go | 2 +- lib/codeintel/tools/lsif-index-tester/main.go | 6 +-- monitoring/command/generate.go | 2 +- monitoring/go.mod | 10 ++-- monitoring/go.sum | 47 ++++++++++++------- monitoring/main.go | 2 +- monitoring/monitoring/generator.go | 10 ++-- 352 files changed, 607 insertions(+), 595 deletions(-) diff --git a/cmd/cody-gateway/internal/actor/actor.go b/cmd/cody-gateway/internal/actor/actor.go index 4e30b79fbac..f37bbaf772e 100644 --- a/cmd/cody-gateway/internal/actor/actor.go +++ b/cmd/cody-gateway/internal/actor/actor.go @@ -189,7 +189,7 @@ func (a *Actor) Limiter( } return &concurrencyLimiter{ - logger: logger.Scoped("concurrency", "concurrency limiter"), + logger: logger.Scoped("concurrency"), actor: a, feature: feature, redis: limiter.NewPrefixRedisStore(fmt.Sprintf("concurrent:%s", featurePrefix), redis), diff --git a/cmd/cody-gateway/internal/actor/dotcomuser/dotcomuser.go b/cmd/cody-gateway/internal/actor/dotcomuser/dotcomuser.go index 09e72d3dcd5..a200c7282b8 100644 --- a/cmd/cody-gateway/internal/actor/dotcomuser/dotcomuser.go +++ b/cmd/cody-gateway/internal/actor/dotcomuser/dotcomuser.go @@ -45,7 +45,7 @@ var _ actor.Source = &Source{} func NewSource(logger log.Logger, cache httpcache.Cache, dotComClient graphql.Client, concurrencyConfig codygateway.ActorConcurrencyLimitConfig) *Source { return &Source{ - log: logger.Scoped("dotcomuser", "dotcom user actor source"), + log: logger.Scoped("dotcomuser"), cache: cache, dotcom: dotComClient, concurrencyConfig: concurrencyConfig, diff --git a/cmd/cody-gateway/internal/actor/productsubscription/productsubscription.go b/cmd/cody-gateway/internal/actor/productsubscription/productsubscription.go index 856610dce19..5a1bcce8b87 100644 --- a/cmd/cody-gateway/internal/actor/productsubscription/productsubscription.go +++ b/cmd/cody-gateway/internal/actor/productsubscription/productsubscription.go @@ -56,7 +56,7 @@ var _ actor.SourceSyncer = &Source{} func NewSource(logger log.Logger, cache httpcache.Cache, dotcomClient graphql.Client, internalMode bool, concurrencyConfig codygateway.ActorConcurrencyLimitConfig) *Source { return &Source{ - log: logger.Scoped("productsubscriptions", "product subscription actor source"), + log: logger.Scoped("productsubscriptions"), cache: cache, dotcom: dotcomClient, diff --git a/cmd/cody-gateway/internal/actor/source.go b/cmd/cody-gateway/internal/actor/source.go index ad40bcca297..3a650396be6 100644 --- a/cmd/cody-gateway/internal/actor/source.go +++ b/cmd/cody-gateway/internal/actor/source.go @@ -150,16 +150,16 @@ func (s *Sources) SyncAll(ctx context.Context, logger log.Logger) error { // at a regular interval. It uses a redsync.Mutex to ensure only one worker is running // at a time. func (s *Sources) Worker(obCtx *observation.Context, rmux *redsync.Mutex, rootInterval time.Duration) goroutine.BackgroundRoutine { - logger := obCtx.Logger.Scoped("sources.worker", "sources background routie") + logger := obCtx.Logger.Scoped("sources.worker") return &redisLockedBackgroundRoutine{ - logger: logger.Scoped("redisLock", "distributed lock layer for sources sync"), + logger: logger.Scoped("redisLock"), rmux: rmux, routine: goroutine.NewPeriodicGoroutine( context.Background(), &sourcesSyncHandler{ - logger: logger.Scoped("handler", "handler for actor sources sync"), + logger: logger.Scoped("handler"), rmux: rmux, sources: s, syncInterval: rootInterval, diff --git a/cmd/cody-gateway/internal/events/buffered.go b/cmd/cody-gateway/internal/events/buffered.go index 93e56fc8a99..9230aaa0ade 100644 --- a/cmd/cody-gateway/internal/events/buffered.go +++ b/cmd/cody-gateway/internal/events/buffered.go @@ -65,7 +65,7 @@ func defaultWorkers(bufferSize, workerCount int) int { // goroutine.BackgroundRoutine that must be started. func NewBufferedLogger(logger log.Logger, handler Logger, bufferSize, workerCount int) *BufferedLogger { return &BufferedLogger{ - log: logger.Scoped("bufferedLogger", "buffered events logger"), + log: logger.Scoped("bufferedLogger"), handler: handler, diff --git a/cmd/cody-gateway/internal/events/events.go b/cmd/cody-gateway/internal/events/events.go index 7c89e9fe505..779d768b260 100644 --- a/cmd/cody-gateway/internal/events/events.go +++ b/cmd/cody-gateway/internal/events/events.go @@ -138,7 +138,7 @@ func NewStdoutLogger(logger log.Logger) Logger { // demo tracing in dev. return &instrumentedLogger{ Scope: "stdoutLogger", - Logger: &stdoutLogger{logger: logger.Scoped("events", "event logger")}, + Logger: &stdoutLogger{logger: logger.Scoped("events")}, } } diff --git a/cmd/cody-gateway/internal/httpapi/completions/upstream.go b/cmd/cody-gateway/internal/httpapi/completions/upstream.go index 2ef68c18ce2..c6e181ea2d2 100644 --- a/cmd/cody-gateway/internal/httpapi/completions/upstream.go +++ b/cmd/cody-gateway/internal/httpapi/completions/upstream.go @@ -98,7 +98,7 @@ func makeUpstreamHandler[ReqT UpstreamRequest]( // response. defaultRetryAfterSeconds int, ) http.Handler { - baseLogger = baseLogger.Scoped(upstreamName, fmt.Sprintf("%s upstream handler", upstreamName)). + baseLogger = baseLogger.Scoped(upstreamName). With(log.String("upstream.url", upstreamAPIURL)) // Convert allowedModels to the Cody Gateway configuration format with the diff --git a/cmd/cody-gateway/internal/httpapi/diagnostics.go b/cmd/cody-gateway/internal/httpapi/diagnostics.go index 0e2ae5d790d..f93bf2c8805 100644 --- a/cmd/cody-gateway/internal/httpapi/diagnostics.go +++ b/cmd/cody-gateway/internal/httpapi/diagnostics.go @@ -27,7 +27,7 @@ import ( // we do a simple auth on a static secret instead that is uniquely generated per // deployment. func NewDiagnosticsHandler(baseLogger log.Logger, next http.Handler, secret string, sources *actor.Sources) http.Handler { - baseLogger = baseLogger.Scoped("diagnostics", "healthz checks") + baseLogger = baseLogger.Scoped("diagnostics") hasValidSecret := func(l log.Logger, w http.ResponseWriter, r *http.Request) (yes bool) { token, err := authbearer.ExtractBearer(r.Header) diff --git a/cmd/cody-gateway/internal/httpapi/embeddings/handler.go b/cmd/cody-gateway/internal/httpapi/embeddings/handler.go index cef5d38a058..a50515bb32f 100644 --- a/cmd/cody-gateway/internal/httpapi/embeddings/handler.go +++ b/cmd/cody-gateway/internal/httpapi/embeddings/handler.go @@ -34,7 +34,7 @@ func NewHandler( mf ModelFactory, allowedModels []string, ) http.Handler { - baseLogger = baseLogger.Scoped("embeddingshandler", "The HTTP API handler for the embeddings endpoint.") + baseLogger = baseLogger.Scoped("embeddingshandler") return featurelimiter.HandleFeature( baseLogger, diff --git a/cmd/cody-gateway/internal/notify/rate_limit.go b/cmd/cody-gateway/internal/notify/rate_limit.go index 1343e04076c..8818ec7d4a3 100644 --- a/cmd/cody-gateway/internal/notify/rate_limit.go +++ b/cmd/cody-gateway/internal/notify/rate_limit.go @@ -53,7 +53,7 @@ func NewSlackRateLimitNotifier( slackWebhookURL string, slackSender func(ctx context.Context, url string, msg *slack.WebhookMessage) error, ) RateLimitNotifier { - baseLogger = baseLogger.Scoped("slackRateLimitNotifier", "notifications for usage rate limit approaching thresholds") + baseLogger = baseLogger.Scoped("slackRateLimitNotifier") return func(ctx context.Context, actor codygateway.Actor, feature codygateway.Feature, usageRatio float32, ttl time.Duration) { thresholds := actorSourceThresholds.Get(actor.GetSource()) diff --git a/cmd/cody-gateway/shared/main.go b/cmd/cody-gateway/shared/main.go index a206af13060..cc7095fc273 100644 --- a/cmd/cody-gateway/shared/main.go +++ b/cmd/cody-gateway/shared/main.go @@ -112,7 +112,7 @@ func Main(ctx context.Context, obctx *observation.Context, ready service.ReadyFu } authr := &auth.Authenticator{ - Logger: obctx.Logger.Scoped("auth", "authentication middleware"), + Logger: obctx.Logger.Scoped("auth"), EventLogger: eventLogger, Sources: sources, } @@ -257,14 +257,14 @@ func initOpenTelemetry(ctx context.Context, logger log.Logger, config OpenTeleme // Enable tracing, at this point tracing wouldn't have been enabled yet because // we run Cody Gateway without conf which means Sourcegraph tracing is not enabled. shutdownTracing, err := maybeEnableTracing(ctx, - logger.Scoped("tracing", "OpenTelemetry tracing"), + logger.Scoped("tracing"), config, res) if err != nil { return nil, errors.Wrap(err, "maybeEnableTracing") } shutdownMetrics, err := maybeEnableMetrics(ctx, - logger.Scoped("metrics", "OpenTelemetry metrics"), + logger.Scoped("metrics"), config, res) if err != nil { return nil, errors.Wrap(err, "maybeEnableMetrics") diff --git a/cmd/executor/internal/apiclient/files/client.go b/cmd/executor/internal/apiclient/files/client.go index 168aafe812c..ff31d132abd 100644 --- a/cmd/executor/internal/apiclient/files/client.go +++ b/cmd/executor/internal/apiclient/files/client.go @@ -28,7 +28,7 @@ var _ files.Store = &Client{} // New creates a new Client based on the provided Options. func New(observationCtx *observation.Context, options apiclient.BaseClientOptions) (*Client, error) { - logger := log.Scoped("executor-api-files-client", "The API client adapter for executors to interact with the Files over HTTP") + logger := log.Scoped("executor-api-files-client") client, err := apiclient.NewBaseClient(logger, options) if err != nil { return nil, err diff --git a/cmd/executor/internal/apiclient/queue/client.go b/cmd/executor/internal/apiclient/queue/client.go index 4a03657a731..f5b0d2b7057 100644 --- a/cmd/executor/internal/apiclient/queue/client.go +++ b/cmd/executor/internal/apiclient/queue/client.go @@ -42,7 +42,7 @@ var _ workerutil.Store[types.Job] = &Client{} var _ cmdlogger.ExecutionLogEntryStore = &Client{} func New(observationCtx *observation.Context, options Options, metricsGatherer prometheus.Gatherer) (*Client, error) { - logger := log.Scoped("executor-api-queue-client", "The API client adapter for executors to use dbworkers over HTTP") + logger := log.Scoped("executor-api-queue-client") client, err := apiclient.NewBaseClient(logger, options.BaseClientOptions) if err != nil { return nil, err diff --git a/cmd/executor/internal/run/run.go b/cmd/executor/internal/run/run.go index e297bdbf4ec..8eb40054e52 100644 --- a/cmd/executor/internal/run/run.go +++ b/cmd/executor/internal/run/run.go @@ -30,7 +30,7 @@ func StandaloneRun(ctx context.Context, runner util.CmdRunner, logger log.Logger return err } - logger = log.Scoped("service", "executor service") + logger = log.Scoped("service") // Initialize tracing/metrics observationCtx := observation.NewContext(logger) @@ -103,7 +103,7 @@ func StandaloneRun(ctx context.Context, runner util.CmdRunner, logger log.Logger if cfg.UseFirecracker { routines = append(routines, janitor.NewOrphanedVMJanitor( - log.Scoped("orphaned-vm-janitor", "deletes VMs from a previous executor instance"), + log.Scoped("orphaned-vm-janitor"), cfg.VMPrefix, nameSet, cfg.CleanupTaskInterval, diff --git a/cmd/executor/internal/run/testvm.go b/cmd/executor/internal/run/testvm.go index 272f90ef66b..3d3c351dcc9 100644 --- a/cmd/executor/internal/run/testvm.go +++ b/cmd/executor/internal/run/testvm.go @@ -71,7 +71,7 @@ func createVM(ctx context.Context, cmdRunner util.CmdRunner, config *config.Conf cmd := &command.RealCommand{ CmdRunner: cmdRunner, - Logger: log.Scoped("executor-test-vm", ""), + Logger: log.Scoped("executor-test-vm"), } firecrackerWorkspace, err := workspace.NewFirecrackerWorkspace( ctx, diff --git a/cmd/executor/internal/run/util.go b/cmd/executor/internal/run/util.go index 8cac2cb62e7..110e5763ed2 100644 --- a/cmd/executor/internal/run/util.go +++ b/cmd/executor/internal/run/util.go @@ -268,7 +268,7 @@ func kubernetesOptions(c *config.Config) runner.KubernetesOptions { } func makeWorkerMetrics(queueName string) workerutil.WorkerObservability { - observationCtx := observation.NewContext(log.Scoped("executor_processor", "executor worker processor")) + observationCtx := observation.NewContext(log.Scoped("executor_processor")) return workerutil.NewMetrics(observationCtx, "executor_processor", workerutil.WithSampler(func(job workerutil.Record) bool { return true }), // derived from historic data, ideally we will use spare high-res histograms once they're a reality diff --git a/cmd/executor/internal/worker/runner/docker.go b/cmd/executor/internal/worker/runner/docker.go index c9eb71e5f2f..be5429f7f97 100644 --- a/cmd/executor/internal/worker/runner/docker.go +++ b/cmd/executor/internal/worker/runner/docker.go @@ -43,7 +43,7 @@ func NewDockerRunner( return &dockerRunner{ cmd: cmd, dir: dir, - internalLogger: log.Scoped("docker-runner", ""), + internalLogger: log.Scoped("docker-runner"), commandLogger: logger, options: options, dockerAuthConfig: actualDockerAuthConfig, diff --git a/cmd/executor/internal/worker/runner/firecracker.go b/cmd/executor/internal/worker/runner/firecracker.go index 7cf901f2052..c6ecafdf01a 100644 --- a/cmd/executor/internal/worker/runner/firecracker.go +++ b/cmd/executor/internal/worker/runner/firecracker.go @@ -81,7 +81,7 @@ func NewFirecrackerRunner( cmd: cmd, vmName: vmName, workspaceDevice: workspaceDevice, - internalLogger: log.Scoped("firecracker-runner", ""), + internalLogger: log.Scoped("firecracker-runner"), cmdLogger: logger, options: options, dockerAuthConfig: actualDockerAuthConfig, diff --git a/cmd/executor/internal/worker/runner/kubernetes.go b/cmd/executor/internal/worker/runner/kubernetes.go index 347a407fd42..44cc05d5139 100644 --- a/cmd/executor/internal/worker/runner/kubernetes.go +++ b/cmd/executor/internal/worker/runner/kubernetes.go @@ -47,7 +47,7 @@ func NewKubernetesRunner( options command.KubernetesContainerOptions, ) Runner { return &kubernetesRunner{ - internalLogger: log.Scoped("kubernetes-runner", ""), + internalLogger: log.Scoped("kubernetes-runner"), commandLogger: commandLogger, cmd: cmd, dir: dir, diff --git a/cmd/executor/internal/worker/runner/shell.go b/cmd/executor/internal/worker/runner/shell.go index 364daf281f5..51bf4907435 100644 --- a/cmd/executor/internal/worker/runner/shell.go +++ b/cmd/executor/internal/worker/runner/shell.go @@ -33,7 +33,7 @@ func NewShellRunner( return &shellRunner{ cmd: cmd, dir: dir, - internalLogger: log.Scoped("shell-runner", ""), + internalLogger: log.Scoped("shell-runner"), commandLogger: logger, options: options, } diff --git a/cmd/executor/internal/worker/worker.go b/cmd/executor/internal/worker/worker.go index f6bf4348ef2..375d093d708 100644 --- a/cmd/executor/internal/worker/worker.go +++ b/cmd/executor/internal/worker/worker.go @@ -77,9 +77,9 @@ type Options struct { // NewWorker creates a worker that polls a remote job queue API for work. func NewWorker(observationCtx *observation.Context, nameSet *janitor.NameSet, options Options) (goroutine.WaitableBackgroundRoutine, error) { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("worker", "background worker task periodically fetching jobs"), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("worker"), observationCtx) - gatherer := metrics.MakeExecutorMetricsGatherer(log.Scoped("executor-worker.metrics-gatherer", ""), prometheus.DefaultGatherer, options.NodeExporterEndpoint, options.DockerRegistryNodeExporterEndpoint) + gatherer := metrics.MakeExecutorMetricsGatherer(log.Scoped("executor-worker.metrics-gatherer"), prometheus.DefaultGatherer, options.NodeExporterEndpoint, options.DockerRegistryNodeExporterEndpoint) queueClient, err := queue.New(observationCtx, options.QueueOptions, gatherer) if err != nil { return nil, errors.Wrap(err, "building queue worker client") @@ -105,7 +105,7 @@ func NewWorker(observationCtx *observation.Context, nameSet *janitor.NameSet, op cmdRunner := &util.RealCmdRunner{} cmd := &command.RealCommand{ CmdRunner: cmdRunner, - Logger: log.Scoped("executor-worker.command", "command execution"), + Logger: log.Scoped("executor-worker.command"), } // Configure the supported runtimes diff --git a/cmd/executor/main.go b/cmd/executor/main.go index 3d462d786e2..65aec84506a 100644 --- a/cmd/executor/main.go +++ b/cmd/executor/main.go @@ -33,7 +33,7 @@ func main() { }) defer liblog.Sync() - logger := log.Scoped("executor", "the executor service polls the public Sourcegraph frontend API for work to perform") + logger := log.Scoped("executor") runner := &util.RealCmdRunner{} diff --git a/cmd/frontend/auth/user.go b/cmd/frontend/auth/user.go index a78d87fbdb0..80c3e69297f 100644 --- a/cmd/frontend/auth/user.go +++ b/cmd/frontend/auth/user.go @@ -67,7 +67,7 @@ func GetAndSaveUser(ctx context.Context, db database.DB, op GetAndSaveUserOp) (n externalAccountsStore := db.UserExternalAccounts() users := db.Users() - logger := sglog.Scoped("authGetAndSaveUser", "get and save user authenticated by external providers") + logger := sglog.Scoped("authGetAndSaveUser") acct := &extsvc.Account{ AccountSpec: op.ExternalAccount, diff --git a/cmd/frontend/backend/external_services.go b/cmd/frontend/backend/external_services.go index f42d2d663a9..a15f3cd8758 100644 --- a/cmd/frontend/backend/external_services.go +++ b/cmd/frontend/backend/external_services.go @@ -44,14 +44,14 @@ type externalServices struct { func NewExternalServices(logger log.Logger, db database.DB) ExternalServicesService { return &externalServices{ - logger: logger.Scoped("ExternalServices", "service related to external service functionality"), + logger: logger.Scoped("ExternalServices"), db: db, } } func NewMockExternalServices(logger log.Logger, db database.DB, mockSourcer internalrepos.Sourcer) ExternalServicesService { return &externalServices{ - logger: logger.Scoped("ExternalServices", "service related to external service functionality"), + logger: logger.Scoped("ExternalServices"), db: db, mockSourcer: mockSourcer, } @@ -63,7 +63,7 @@ func (e *externalServices) ValidateConnection(ctx context.Context, svc *types.Ex ctx, cancel := context.WithTimeout(ctx, validateConnectionTimeout) defer cancel() - genericSourcer := newGenericSourcer(log.Scoped("externalservice.validateconnection", ""), e.db) + genericSourcer := newGenericSourcer(log.Scoped("externalservice.validateconnection"), e.db) genericSrc, err := genericSourcer(ctx, svc) if err != nil { if ctx.Err() != nil && ctx.Err() == context.DeadlineExceeded { @@ -130,7 +130,7 @@ func (e *externalServices) ListNamespaces(ctx context.Context, externalServiceID return nil, err } } else { - genericSourcer := newGenericSourcer(log.Scoped("externalservice.namespacediscovery", ""), e.db) + genericSourcer := newGenericSourcer(log.Scoped("externalservice.namespacediscovery"), e.db) genericSrc, err = genericSourcer(ctx, externalSvc) if err != nil { return nil, err @@ -199,7 +199,7 @@ func (e *externalServices) DiscoverRepos(ctx context.Context, externalServiceID return nil, err } } else { - genericSourcer := newGenericSourcer(log.Scoped("externalservice.repodiscovery", ""), e.db) + genericSourcer := newGenericSourcer(log.Scoped("externalservice.repodiscovery"), e.db) genericSrc, err = genericSourcer(ctx, externalSvc) if err != nil { return nil, err @@ -260,7 +260,7 @@ func (e *externalServices) ExcludeRepoFromExternalServices(ctx context.Context, return err } - logger := e.logger.Scoped("ExcludeRepoFromExternalServices", "excluding a repo from external service config").With(log.Int32("repoID", int32(repoID))) + logger := e.logger.Scoped("ExcludeRepoFromExternalServices").With(log.Int32("repoID", int32(repoID))) for _, extSvcID := range externalServiceIDs { logger = logger.With(log.Int64("externalServiceID", extSvcID)) } @@ -436,8 +436,8 @@ func schemaContainsExclusion[T comparable](exclusions []*T, newExclusion *T) boo func newGenericSourcer(logger log.Logger, db database.DB) internalrepos.Sourcer { // We use the generic sourcer that doesn't have observability attached to it here because the way externalServiceValidate is set up, // using the regular sourcer will cause a large dump of errors to be logged when it exits ListRepos prematurely. - sourcerLogger := logger.Scoped("repos.Sourcer", "repositories source") - db = database.NewDBWith(sourcerLogger.Scoped("db", "sourcer database"), db) + sourcerLogger := logger.Scoped("repos.Sourcer") + db = database.NewDBWith(sourcerLogger.Scoped("db"), db) dependenciesService := dependencies.NewService(observation.NewContext(logger), db) cf := httpcli.NewExternalClientFactory(httpcli.NewLoggingMiddleware(sourcerLogger)) return internalrepos.NewSourcer(sourcerLogger, db, cf, internalrepos.WithDependenciesService(dependenciesService)) diff --git a/cmd/frontend/backend/inventory.go b/cmd/frontend/backend/inventory.go index e25067ead86..ab18547af71 100644 --- a/cmd/frontend/backend/inventory.go +++ b/cmd/frontend/backend/inventory.go @@ -40,7 +40,7 @@ func InventoryContext(logger log.Logger, repo api.RepoName, gsClient gitserver.C return info.OID().String() } - logger = logger.Scoped("InventoryContext", "returns the inventory context for computing the inventory for the repository at the given commit"). + logger = logger.Scoped("InventoryContext"). With(log.String("repo", string(repo)), log.String("commitID", string(commitID))) invCtx := inventory.Context{ ReadTree: func(ctx context.Context, path string) ([]fs.FileInfo, error) { diff --git a/cmd/frontend/backend/repos.go b/cmd/frontend/backend/repos.go index 77f5831f353..ce8b6349880 100644 --- a/cmd/frontend/backend/repos.go +++ b/cmd/frontend/backend/repos.go @@ -48,7 +48,7 @@ type ReposService interface { // more idiomatic solution. func NewRepos(logger log.Logger, db database.DB, client gitserver.Client) ReposService { repoStore := db.Repos() - logger = logger.Scoped("repos", "provides a repos store for the backend") + logger = logger.Scoped("repos") return &repos{ logger: logger, db: db, diff --git a/cmd/frontend/backend/user_emails.go b/cmd/frontend/backend/user_emails.go index 4be4fbe70c7..06f36a63b14 100644 --- a/cmd/frontend/backend/user_emails.go +++ b/cmd/frontend/backend/user_emails.go @@ -44,7 +44,7 @@ type UserEmailsService interface { func NewUserEmailsService(db database.DB, logger log.Logger) UserEmailsService { return &userEmails{ db: db, - logger: logger.Scoped("UserEmails", "user emails handling service"), + logger: logger.Scoped("UserEmails"), } } @@ -56,7 +56,7 @@ type userEmails struct { // Add adds an email address to a user. If email verification is required, it sends an email // verification email. func (e *userEmails) Add(ctx context.Context, userID int32, email string) error { - logger := e.logger.Scoped("Add", "handles addition of user emails") + logger := e.logger.Scoped("Add") // 🚨 SECURITY: Only the user and site admins can add an email address to a user. if err := auth.CheckSiteAdminOrSameUser(ctx, e.db, userID); err != nil { return err @@ -119,7 +119,7 @@ func (e *userEmails) Add(ctx context.Context, userID int32, email string) error // Remove removes the e-mail from the specified user. Perforce external accounts // using the e-mail will also be removed. func (e *userEmails) Remove(ctx context.Context, userID int32, email string) error { - logger := e.logger.Scoped("Remove", "handles removal of user emails"). + logger := e.logger.Scoped("Remove"). With(log.Int32("userID", userID)) // 🚨 SECURITY: Only the authenticated user and site admins can remove email @@ -166,7 +166,7 @@ func (e *userEmails) Remove(ctx context.Context, userID int32, email string) err // SetPrimaryEmail sets the supplied e-mail address as the primary address for // the given user. func (e *userEmails) SetPrimaryEmail(ctx context.Context, userID int32, email string) error { - logger := e.logger.Scoped("SetPrimaryEmail", "handles setting primary e-mail for user"). + logger := e.logger.Scoped("SetPrimaryEmail"). With(log.Int32("userID", userID)) // 🚨 SECURITY: Only the authenticated user and site admins can set the primary @@ -192,7 +192,7 @@ func (e *userEmails) SetPrimaryEmail(ctx context.Context, userID int32, email st // If verified is false, Perforce external accounts using the e-mail will be // removed. func (e *userEmails) SetVerified(ctx context.Context, userID int32, email string, verified bool) error { - logger := e.logger.Scoped("SetVerified", "handles setting e-mail as verified") + logger := e.logger.Scoped("SetVerified") // 🚨 SECURITY: Only site admins (NOT users themselves) can manually set email // verification status. Users themselves must go through the normal email diff --git a/cmd/frontend/codyapp/update_check_handler.go b/cmd/frontend/codyapp/update_check_handler.go index e42e7d335d1..b0b5ca2d2f0 100644 --- a/cmd/frontend/codyapp/update_check_handler.go +++ b/cmd/frontend/codyapp/update_check_handler.go @@ -50,7 +50,7 @@ type AppNoopUpdateChecker struct{} func NewAppUpdateChecker(logger log.Logger, resolver UpdateManifestResolver) *AppUpdateChecker { return &AppUpdateChecker{ - logger: logger.Scoped("app.update.checker", "Handler that handles sourcegraph app requests that check for updates"), + logger: logger.Scoped("app.update.checker"), manifestResolver: resolver, } } diff --git a/cmd/frontend/graphqlbackend/access_tokens.go b/cmd/frontend/graphqlbackend/access_tokens.go index d70e214e870..a3de2c70539 100644 --- a/cmd/frontend/graphqlbackend/access_tokens.go +++ b/cmd/frontend/graphqlbackend/access_tokens.go @@ -89,7 +89,7 @@ func (r *schemaResolver) CreateAccessToken(ctx context.Context, args *createAcce uid := actor.FromContext(ctx).UID id, token, err := r.db.AccessTokens().Create(ctx, userID, args.Scopes, args.Note, uid) - logger := r.logger.Scoped("CreateAccessToken", "access token creation"). + logger := r.logger.Scoped("CreateAccessToken"). With(log.Int32("userID", uid)) if conf.CanSendEmail() { @@ -178,7 +178,7 @@ func (r *schemaResolver) DeleteAccessToken(ctx context.Context, args *deleteAcce } - logger := r.logger.Scoped("DeleteAccessToken", "access token deletion"). + logger := r.logger.Scoped("DeleteAccessToken"). With(log.Int32("userID", token.SubjectUserID)) if conf.CanSendEmail() { diff --git a/cmd/frontend/graphqlbackend/external_service.go b/cmd/frontend/graphqlbackend/external_service.go index 85dbfd13d57..cd129706d88 100644 --- a/cmd/frontend/graphqlbackend/external_service.go +++ b/cmd/frontend/graphqlbackend/external_service.go @@ -83,7 +83,7 @@ func externalServiceByID(ctx context.Context, db database.DB, gqlID graphql.ID) return nil, err } - return &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: db, externalService: es}, nil + return &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: db, externalService: es}, nil } func MarshalExternalServiceID(id int64) graphql.ID { @@ -263,7 +263,7 @@ func (r *externalServiceResolver) CheckConnection(ctx context.Context) (*externa source, err := repos.NewSource( ctx, - log.Scoped("externalServiceResolver.CheckConnection", ""), + log.Scoped("externalServiceResolver.CheckConnection"), r.db, r.externalService, httpcli.ExternalClientFactory, @@ -485,7 +485,7 @@ func (r *externalServiceNamespaceConnectionResolver) compute(ctx context.Context return } - e := newExternalServices(log.Scoped("graphql.externalservicenamespaces", ""), r.db) + e := newExternalServices(log.Scoped("graphql.externalservicenamespaces"), r.db) r.nodes, r.err = e.ListNamespaces(ctx, externalServiceID, r.args.Kind, config) r.totalCount = int32(len(r.nodes)) }) @@ -549,7 +549,7 @@ func (r *externalServiceRepositoryConnectionResolver) compute(ctx context.Contex return } - e := newExternalServices(log.Scoped("graphql.externalservicerepositories", ""), r.db) + e := newExternalServices(log.Scoped("graphql.externalservicerepositories"), r.db) r.nodes, r.err = e.DiscoverRepos(ctx, externalServiceID, r.args.Kind, config, first, r.args.Query, r.args.ExcludeRepos) }) diff --git a/cmd/frontend/graphqlbackend/external_services.go b/cmd/frontend/graphqlbackend/external_services.go index 8980fcadcac..bc08c28f6e2 100644 --- a/cmd/frontend/graphqlbackend/external_services.go +++ b/cmd/frontend/graphqlbackend/external_services.go @@ -86,7 +86,7 @@ func (r *schemaResolver) AddExternalService(ctx context.Context, args *addExtern // Verify if the connection is functional, to render a warning message in the // editor if not. - res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver", ""), db: r.db, externalService: externalService} + res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver"), db: r.db, externalService: externalService} if err = newExternalServices(r.logger, r.db).ValidateConnection(ctx, externalService); err != nil { res.warning = fmt.Sprintf("External service created, but we encountered a problem while validating the external service: %s", err) } @@ -166,7 +166,7 @@ func (r *schemaResolver) UpdateExternalService(ctx context.Context, args *update r.logger.Warn("Failed to trigger external service sync") } - res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver", ""), db: r.db, externalService: es} + res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver"), db: r.db, externalService: es} if oldConfig != newConfig { // Verify if the connection is functional, to render a warning message in the @@ -327,7 +327,7 @@ func (r *externalServiceConnectionResolver) Nodes(ctx context.Context) ([]*exter } resolvers := make([]*externalServiceResolver, 0, len(externalServices)) for _, externalService := range externalServices { - resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: r.db, externalService: externalService}) + resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: r.db, externalService: externalService}) } return resolvers, nil } @@ -392,7 +392,7 @@ func (r *ComputedExternalServiceConnectionResolver) Nodes(_ context.Context) []* } resolvers := make([]*externalServiceResolver, 0, len(svcs)) for _, svc := range svcs { - resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: r.db, externalService: svc}) + resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: r.db, externalService: svc}) } return resolvers } diff --git a/cmd/frontend/graphqlbackend/git_commit.go b/cmd/frontend/graphqlbackend/git_commit.go index 424a6c67c31..fbddfb30ad3 100644 --- a/cmd/frontend/graphqlbackend/git_commit.go +++ b/cmd/frontend/graphqlbackend/git_commit.go @@ -71,7 +71,7 @@ type GitCommitResolver struct { func NewGitCommitResolver(db database.DB, gsClient gitserver.Client, repo *RepositoryResolver, id api.CommitID, commit *gitdomain.Commit) *GitCommitResolver { repoName := repo.RepoName() return &GitCommitResolver{ - logger: log.Scoped("gitCommitResolver", "resolve a specific commit").With( + logger: log.Scoped("gitCommitResolver").With( log.String("repo", string(repoName)), log.String("commitID", string(id)), ), diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index 284772e4440..00401bd2425 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -629,7 +629,7 @@ func NewSchema( } } - logger := log.Scoped("GraphQL", "general GraphQL logging") + logger := log.Scoped("GraphQL") opts := []graphql.SchemaOpt{ graphql.Tracer(&requestTracer{ DB: db, @@ -694,7 +694,7 @@ type OptionalResolver struct { // defaults. It does not implement any sub-resolvers. func newSchemaResolver(db database.DB, gitserverClient gitserver.Client) *schemaResolver { r := &schemaResolver{ - logger: log.Scoped("schemaResolver", "GraphQL schema resolver"), + logger: log.Scoped("schemaResolver"), db: db, gitserverClient: gitserverClient, repoupdaterClient: repoupdater.DefaultClient, diff --git a/cmd/frontend/graphqlbackend/perforce_changelist.go b/cmd/frontend/graphqlbackend/perforce_changelist.go index 191236b6397..ded1d4d82c1 100644 --- a/cmd/frontend/graphqlbackend/perforce_changelist.go +++ b/cmd/frontend/graphqlbackend/perforce_changelist.go @@ -46,7 +46,7 @@ func newPerforceChangelistResolver(r *RepositoryResolver, changelistID, commitSH canonicalURL := filepath.Join(repoURL.Path, "-", "changelist", changelistID) return &PerforceChangelistResolver{ - logger: r.logger.Scoped("PerforceChangelistResolver", "resolve a specific changelist"), + logger: r.logger.Scoped("PerforceChangelistResolver"), repositoryResolver: r, cid: changelistID, commitSHA: commitSHA, diff --git a/cmd/frontend/graphqlbackend/repositories.go b/cmd/frontend/graphqlbackend/repositories.go index db2a8c2ebc6..d76a9e2a662 100644 --- a/cmd/frontend/graphqlbackend/repositories.go +++ b/cmd/frontend/graphqlbackend/repositories.go @@ -112,7 +112,7 @@ func (r *schemaResolver) Repositories(ctx context.Context, args *repositoryArgs) connectionStore := &repositoriesConnectionStore{ ctx: ctx, db: r.db, - logger: r.logger.Scoped("repositoryConnectionResolver", "resolves connections to a repository"), + logger: r.logger.Scoped("repositoryConnectionResolver"), opt: opt, } diff --git a/cmd/frontend/graphqlbackend/repository.go b/cmd/frontend/graphqlbackend/repository.go index 552401ced12..a08a7c81e37 100644 --- a/cmd/frontend/graphqlbackend/repository.go +++ b/cmd/frontend/graphqlbackend/repository.go @@ -75,7 +75,7 @@ func NewRepositoryResolver(db database.DB, client gitserver.Client, repo *types. Name: name, ID: id, }, - logger: log.Scoped("repositoryResolver", "resolve a specific repository"). + logger: log.Scoped("repositoryResolver"). With(log.Object("repo", log.String("name", string(name)), log.Int32("id", int32(id)))), diff --git a/cmd/frontend/graphqlbackend/search.go b/cmd/frontend/graphqlbackend/search.go index 897c4fa71bd..7ff4d57998b 100644 --- a/cmd/frontend/graphqlbackend/search.go +++ b/cmd/frontend/graphqlbackend/search.go @@ -43,7 +43,7 @@ func NewBatchSearchImplementer(ctx context.Context, logger log.Logger, db databa } return &searchResolver{ - logger: logger.Scoped("BatchSearchSearchImplementer", "provides search results and suggestions"), + logger: logger.Scoped("BatchSearchSearchImplementer"), client: cli, db: db, SearchInputs: inputs, diff --git a/cmd/frontend/graphqlbackend/search_results.go b/cmd/frontend/graphqlbackend/search_results.go index bc2007b54bd..07a285ccbbc 100644 --- a/cmd/frontend/graphqlbackend/search_results.go +++ b/cmd/frontend/graphqlbackend/search_results.go @@ -443,7 +443,7 @@ func (r *searchResolver) Stats(ctx context.Context) (stats *searchResultsStats, if err := json.Unmarshal(jsonRes, &stats); err != nil { return nil, err } - stats.logger = r.logger.Scoped("searchResultsStats", "provides status on search results") + stats.logger = r.logger.Scoped("searchResultsStats") stats.sr = r return stats, nil } @@ -504,7 +504,7 @@ func (r *searchResolver) Stats(ctx context.Context) (stats *searchResultsStats, return nil, err // sparkline generation failed, so don't cache. } stats = &searchResultsStats{ - logger: r.logger.Scoped("searchResultsStats", "provides status on search results"), + logger: r.logger.Scoped("searchResultsStats"), JApproximateResultCount: v.ApproximateResultCount(), JSparkline: sparkline, sr: r, diff --git a/cmd/frontend/graphqlbackend/search_results_stats_languages.go b/cmd/frontend/graphqlbackend/search_results_stats_languages.go index 1749917a124..8d7d45d03b9 100644 --- a/cmd/frontend/graphqlbackend/search_results_stats_languages.go +++ b/cmd/frontend/graphqlbackend/search_results_stats_languages.go @@ -27,7 +27,7 @@ func (srs *searchResultsStats) Languages(ctx context.Context) ([]*languageStatis return nil, err } - logger := srs.logger.Scoped("languages", "provide stats on langauges from the search results") + logger := srs.logger.Scoped("languages") langs, err := searchResultsStatsLanguages(ctx, logger, srs.sr.db, gitserver.NewClient(), matches) if err != nil { return nil, err diff --git a/cmd/frontend/graphqlbackend/send_test_email.go b/cmd/frontend/graphqlbackend/send_test_email.go index 35f247ebece..a61cf8ec79c 100644 --- a/cmd/frontend/graphqlbackend/send_test_email.go +++ b/cmd/frontend/graphqlbackend/send_test_email.go @@ -18,7 +18,7 @@ func (r *schemaResolver) SendTestEmail(ctx context.Context, args struct{ To stri return "", err } - logger := r.logger.Scoped("SendTestEmail", "email send test") + logger := r.logger.Scoped("SendTestEmail") // Generate a simple identifier to make each email unique (don't need the full ID) var testID string diff --git a/cmd/frontend/graphqlbackend/settings_cascade.go b/cmd/frontend/graphqlbackend/settings_cascade.go index 478e6e92c5e..0aaabc15649 100644 --- a/cmd/frontend/graphqlbackend/settings_cascade.go +++ b/cmd/frontend/graphqlbackend/settings_cascade.go @@ -30,7 +30,7 @@ func (r *settingsCascade) Subjects(ctx context.Context) ([]*settingsSubjectResol return nil, err } - return resolversForSubjects(ctx, log.Scoped("settings", "subjects"), r.db, subjects) + return resolversForSubjects(ctx, log.Scoped("settings"), r.db, subjects) } func (r *settingsCascade) Final(ctx context.Context) (string, error) { @@ -62,7 +62,7 @@ func (r *schemaResolver) ViewerSettings(ctx context.Context) (*settingsCascade, return nil, err } if user == nil { - return &settingsCascade{db: r.db, subject: &settingsSubjectResolver{site: NewSiteResolver(log.Scoped("settings", "ViewerSettings"), r.db)}}, nil + return &settingsCascade{db: r.db, subject: &settingsSubjectResolver{site: NewSiteResolver(log.Scoped("settings"), r.db)}}, nil } return &settingsCascade{db: r.db, subject: &settingsSubjectResolver{user: user}}, nil } diff --git a/cmd/frontend/graphqlbackend/site.go b/cmd/frontend/graphqlbackend/site.go index a81f64c118b..e2501704529 100644 --- a/cmd/frontend/graphqlbackend/site.go +++ b/cmd/frontend/graphqlbackend/site.go @@ -377,7 +377,7 @@ func (r *siteResolver) UpgradeReadiness(ctx context.Context) (*upgradeReadinessR } return &upgradeReadinessResolver{ - logger: r.logger.Scoped("upgradeReadiness", ""), + logger: r.logger.Scoped("upgradeReadiness"), db: r.db, }, nil } diff --git a/cmd/frontend/graphqlbackend/site_admin.go b/cmd/frontend/graphqlbackend/site_admin.go index 27094ba83ee..7570cd1745a 100644 --- a/cmd/frontend/graphqlbackend/site_admin.go +++ b/cmd/frontend/graphqlbackend/site_admin.go @@ -102,7 +102,7 @@ func (r *schemaResolver) DeleteUsers(ctx context.Context, args *struct { ids[index] = id } - logger := r.logger.Scoped("DeleteUsers", "delete users mutation"). + logger := r.logger.Scoped("DeleteUsers"). With(log.Int32s("users", ids)) // Collect username, verified email addresses, and external accounts to be used diff --git a/cmd/frontend/graphqlbackend/status_messages.go b/cmd/frontend/graphqlbackend/status_messages.go index 510852ca927..6cef7cd8af2 100644 --- a/cmd/frontend/graphqlbackend/status_messages.go +++ b/cmd/frontend/graphqlbackend/status_messages.go @@ -95,7 +95,7 @@ func (r *statusMessageResolver) ExternalService(ctx context.Context) (*externalS return nil, err } - return &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: r.db, externalService: externalService}, nil + return &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: r.db, externalService: externalService}, nil } type indexingProgressMessageResolver struct { diff --git a/cmd/frontend/graphqlbackend/user.go b/cmd/frontend/graphqlbackend/user.go index b66df49a934..e26c8bc94ac 100644 --- a/cmd/frontend/graphqlbackend/user.go +++ b/cmd/frontend/graphqlbackend/user.go @@ -76,7 +76,7 @@ func NewUserResolver(ctx context.Context, db database.DB, user *types.User) *Use return &UserResolver{ db: db, user: user, - logger: log.Scoped("userResolver", "resolves a specific user").With(log.String("user", user.Username)), + logger: log.Scoped("userResolver").With(log.String("user", user.Username)), actor: actor.FromContext(ctx), } } @@ -86,7 +86,7 @@ func newUserResolverFromActor(a *actor.Actor, db database.DB, user *types.User) return &UserResolver{ db: db, user: user, - logger: log.Scoped("userResolver", "resolves a specific user").With(log.String("user", user.Username)), + logger: log.Scoped("userResolver").With(log.String("user", user.Username)), actor: a, } } @@ -399,7 +399,7 @@ func (r *schemaResolver) UpdatePassword(ctx context.Context, args *struct { return nil, err } - logger := r.logger.Scoped("UpdatePassword", "password update"). + logger := r.logger.Scoped("UpdatePassword"). With(log.Int32("userID", user.ID)) if conf.CanSendEmail() { @@ -427,7 +427,7 @@ func (r *schemaResolver) CreatePassword(ctx context.Context, args *struct { return nil, err } - logger := r.logger.Scoped("CreatePassword", "password creation"). + logger := r.logger.Scoped("CreatePassword"). With(log.Int32("userID", user.ID)) if conf.CanSendEmail() { diff --git a/cmd/frontend/graphqlbackend/user_emails.go b/cmd/frontend/graphqlbackend/user_emails.go index 93f4c83f8cb..c1f461990c9 100644 --- a/cmd/frontend/graphqlbackend/user_emails.go +++ b/cmd/frontend/graphqlbackend/user_emails.go @@ -198,7 +198,7 @@ func (r *schemaResolver) AddUserEmail(ctx context.Context, args *addUserEmailArg return nil, err } - logger := r.logger.Scoped("AddUserEmail", "adding email to user"). + logger := r.logger.Scoped("AddUserEmail"). With(log.Int32("userID", userID)) userEmails := backend.NewUserEmailsService(r.db, logger) diff --git a/cmd/frontend/graphqlbackend/users_create.go b/cmd/frontend/graphqlbackend/users_create.go index a652ed10802..a650d25679c 100644 --- a/cmd/frontend/graphqlbackend/users_create.go +++ b/cmd/frontend/graphqlbackend/users_create.go @@ -46,7 +46,7 @@ func (r *schemaResolver) CreateUser(ctx context.Context, args *struct { needsEmailVerification = false } - logger := r.logger.Scoped("createUser", "create user handler").With( + logger := r.logger.Scoped("createUser").With( log.Bool("needsEmailVerification", needsEmailVerification)) var emailVerificationCode string diff --git a/cmd/frontend/graphqlbackend/users_randomize_password.go b/cmd/frontend/graphqlbackend/users_randomize_password.go index 24ae1e88139..0f7dfa5cec6 100644 --- a/cmd/frontend/graphqlbackend/users_randomize_password.go +++ b/cmd/frontend/graphqlbackend/users_randomize_password.go @@ -81,7 +81,7 @@ func (r *schemaResolver) RandomizeUserPassword(ctx context.Context, args *struct return nil, errors.Wrap(err, "cannot parse user ID") } - logger := r.logger.Scoped("randomizeUserPassword", "endpoint for resetting user passwords"). + logger := r.logger.Scoped("randomizeUserPassword"). With(log.Int32("userID", userID)) logger.Info("resetting user password") diff --git a/cmd/frontend/graphqlbackend/webhook_logs.go b/cmd/frontend/graphqlbackend/webhook_logs.go index 0ad18f064d6..4bd3489f6a4 100644 --- a/cmd/frontend/graphqlbackend/webhook_logs.go +++ b/cmd/frontend/graphqlbackend/webhook_logs.go @@ -143,7 +143,7 @@ func NewWebhookLogConnectionResolver( } return &WebhookLogConnectionResolver{ - logger: log.Scoped("webhookLogConnectionResolver", ""), + logger: log.Scoped("webhookLogConnectionResolver"), args: args, externalServiceID: externalServiceID, store: db.WebhookLogs(keyring.Default().WebhookLogKey), diff --git a/cmd/frontend/internal/app/app.go b/cmd/frontend/internal/app/app.go index 4f38e2cdfd2..217017d8af0 100644 --- a/cmd/frontend/internal/app/app.go +++ b/cmd/frontend/internal/app/app.go @@ -36,7 +36,7 @@ func NewHandler(db database.DB, logger log.Logger, githubAppSetupHandler http.Ha return globals.ExternalURL().Scheme == "https" })) - logger = logger.Scoped("appHandler", "handles routes for all app related requests") + logger = logger.Scoped("appHandler") r := router.Router() diff --git a/cmd/frontend/internal/app/debug.go b/cmd/frontend/internal/app/debug.go index 56e0e11e8f3..90d1fc96562 100644 --- a/cmd/frontend/internal/app/debug.go +++ b/cmd/frontend/internal/app/debug.go @@ -145,7 +145,7 @@ func addGrafana(r *mux.Router, db database.DB) { // The route only forwards known project ids, so a DSN must be defined in siteconfig.Log.Sentry.Dsn // to allow events to be forwarded. Sentry responses are ignored. func addSentry(r *mux.Router) { - logger := sglog.Scoped("sentryTunnel", "A Sentry.io specific HTTP route that allows to forward client-side reports, https://docs.sentry.io/platforms/javascript/troubleshooting/#dealing-with-ad-blockers") + logger := sglog.Scoped("sentryTunnel") // Helper to fetch Sentry configuration from siteConfig. getConfig := func() (string, string, error) { @@ -285,7 +285,7 @@ func addOpenTelemetryProtocolAdapter(r *mux.Router) { ctx = context.Background() endpoint = otlpenv.GetEndpoint() protocol = otlpenv.GetProtocol() - logger = sglog.Scoped("otlpAdapter", "OpenTelemetry protocol adapter and forwarder"). + logger = sglog.Scoped("otlpAdapter"). With(sglog.String("endpoint", endpoint), sglog.String("protocol", string(protocol))) ) diff --git a/cmd/frontend/internal/app/editor.go b/cmd/frontend/internal/app/editor.go index 7d71d8af24f..dd7156319ce 100644 --- a/cmd/frontend/internal/app/editor.go +++ b/cmd/frontend/internal/app/editor.go @@ -210,7 +210,7 @@ func parseEditorRequest(db database.DB, q url.Values) (*editorRequest, error) { v := &editorRequest{ db: db, - logger: log.Scoped("editor", "requests from editors."), + logger: log.Scoped("editor"), } if search := q.Get("search"); search != "" { diff --git a/cmd/frontend/internal/app/jscontext/jscontext.go b/cmd/frontend/internal/app/jscontext/jscontext.go index 33fb94fcc5b..3df444547d9 100644 --- a/cmd/frontend/internal/app/jscontext/jscontext.go +++ b/cmd/frontend/internal/app/jscontext/jscontext.go @@ -305,7 +305,7 @@ func NewJSContextFromRequest(req *http.Request, db database.DB) JSContext { } } - siteResolver := graphqlbackend.NewSiteResolver(logger.Scoped("jscontext", "constructing jscontext"), db) + siteResolver := graphqlbackend.NewSiteResolver(logger.Scoped("jscontext"), db) needsRepositoryConfiguration, err := siteResolver.NeedsRepositoryConfiguration(ctx) if err != nil { needsRepositoryConfiguration = false diff --git a/cmd/frontend/internal/app/otlpadapter/adapter.go b/cmd/frontend/internal/app/otlpadapter/adapter.go index e91a6f14405..59001fa1c1c 100644 --- a/cmd/frontend/internal/app/otlpadapter/adapter.go +++ b/cmd/frontend/internal/app/otlpadapter/adapter.go @@ -52,7 +52,7 @@ type adaptedSignal struct { // Register attaches a route to the router that adapts requests on the `/otlp` path. func (sig *adaptedSignal) Register(ctx context.Context, logger log.Logger, r *mux.Router, receiverURL *url.URL) { - adapterLogger := logger.Scoped(path.Base(sig.PathPrefix), "OpenTelemetry signal-specific tunnel") + adapterLogger := logger.Scoped(path.Base(sig.PathPrefix)) // Set up an http/json -> ${configured_protocol} adapter adapter, err := sig.CreateAdapter() diff --git a/cmd/frontend/internal/app/resolvers/app.go b/cmd/frontend/internal/app/resolvers/app.go index 81a4f82aa40..7b896871b8a 100644 --- a/cmd/frontend/internal/app/resolvers/app.go +++ b/cmd/frontend/internal/app/resolvers/app.go @@ -316,7 +316,7 @@ func (r localExternalServiceResolver) Repositories(ctx context.Context) ([]graph }) } case *schema.LocalGitExternalService: - src, err := repos.NewLocalGitSource(ctx, log.Scoped("localExternalServiceResolver.Repositories", ""), r.service) + src, err := repos.NewLocalGitSource(ctx, log.Scoped("localExternalServiceResolver.Repositories"), r.service) if err != nil { return nil, err } diff --git a/cmd/frontend/internal/app/sign_out.go b/cmd/frontend/internal/app/sign_out.go index 0dd77f32dba..02babc31e1f 100644 --- a/cmd/frontend/internal/app/sign_out.go +++ b/cmd/frontend/internal/app/sign_out.go @@ -36,7 +36,7 @@ func RegisterSSOSignOutHandler(f func(w http.ResponseWriter, r *http.Request)) { } func serveSignOutHandler(logger log.Logger, db database.DB) http.HandlerFunc { - logger = logger.Scoped("signOut", "signout handler") + logger = logger.Scoped("signOut") recorder := telemetryrecorder.NewBestEffort(logger, db) return func(w http.ResponseWriter, r *http.Request) { diff --git a/cmd/frontend/internal/app/ui/handlers.go b/cmd/frontend/internal/app/ui/handlers.go index d15f255da26..12b47d95a2b 100644 --- a/cmd/frontend/internal/app/ui/handlers.go +++ b/cmd/frontend/internal/app/ui/handlers.go @@ -138,7 +138,7 @@ var mockNewCommon func(w http.ResponseWriter, r *http.Request, title string, ser // In the case of a repository that is cloning, a Common data structure is // returned but it has a nil Repo. func newCommon(w http.ResponseWriter, r *http.Request, db database.DB, title string, indexed bool, serveError serveErrorHandler) (*Common, error) { - logger := log.Scoped("commonHandler", "") + logger := log.Scoped("commonHandler") if mockNewCommon != nil { return mockNewCommon(w, r, title, serveError) } diff --git a/cmd/frontend/internal/app/ui/help.go b/cmd/frontend/internal/app/ui/help.go index 14ae7567fe0..6247833ab3d 100644 --- a/cmd/frontend/internal/app/ui/help.go +++ b/cmd/frontend/internal/app/ui/help.go @@ -24,7 +24,7 @@ func serveHelp(w http.ResponseWriter, r *http.Request) { page := strings.TrimPrefix(r.URL.Path, "/help") versionStr := version.Version() - logger := sglog.Scoped("serveHelp", "") + logger := sglog.Scoped("serveHelp") logger.Info("redirecting to docs", sglog.String("page", page), sglog.String("versionStr", versionStr)) // For Cody App, help links are handled in the frontend. We should never get here. diff --git a/cmd/frontend/internal/app/ui/landing.go b/cmd/frontend/internal/app/ui/landing.go index 9b1f5ceeb98..c24ea522ad4 100644 --- a/cmd/frontend/internal/app/ui/landing.go +++ b/cmd/frontend/internal/app/ui/landing.go @@ -23,7 +23,7 @@ var goSymbolReg = lazyregexp.New("/info/GoPackage/(.+)$") // serveRepoLanding simply redirects the old (sourcegraph.com//-/info) repo landing page // URLs directly to the repo itself (sourcegraph.com/). func serveRepoLanding(db database.DB) func(http.ResponseWriter, *http.Request) error { - logger := log.Scoped("serveRepoLanding", "redirects the old (sourcegraph.com//-/info) repo landing page") + logger := log.Scoped("serveRepoLanding") return func(w http.ResponseWriter, r *http.Request) error { legacyRepoLandingCounter.Inc() diff --git a/cmd/frontend/internal/app/ui/router.go b/cmd/frontend/internal/app/ui/router.go index 9487bc45d0d..fe1a18e91a1 100644 --- a/cmd/frontend/internal/app/ui/router.go +++ b/cmd/frontend/internal/app/ui/router.go @@ -245,7 +245,7 @@ func brandNameSubtitle(titles ...string) string { } func initRouter(db database.DB, router *mux.Router) { - logger := log.Scoped("router", "") + logger := log.Scoped("router") uirouter.Router = router // make accessible to other packages @@ -505,7 +505,7 @@ func serveErrorNoDebug(w http.ResponseWriter, r *http.Request, db database.DB, e w.WriteHeader(statusCode) errorID := randstring.NewLen(6) - logger := log.Scoped("ui", "logger for serveErrorNoDebug") + logger := log.Scoped("ui") // Determine trace URL and log the error. var traceURL string diff --git a/cmd/frontend/internal/app/verify_email.go b/cmd/frontend/internal/app/verify_email.go index dd938d1bc8c..e9e7308ff29 100644 --- a/cmd/frontend/internal/app/verify_email.go +++ b/cmd/frontend/internal/app/verify_email.go @@ -18,7 +18,7 @@ import ( func serveVerifyEmail(db database.DB) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - logger := log.Scoped("verify-email", "") + logger := log.Scoped("verify-email") email := r.URL.Query().Get("email") verifyCode := r.URL.Query().Get("code") actr := actor.FromContext(ctx) diff --git a/cmd/frontend/internal/auth/azureoauth/provider.go b/cmd/frontend/internal/auth/azureoauth/provider.go index 007091fa6b2..d86c20aa0c8 100644 --- a/cmd/frontend/internal/auth/azureoauth/provider.go +++ b/cmd/frontend/internal/auth/azureoauth/provider.go @@ -31,7 +31,7 @@ const ( func Init(logger log.Logger, db database.DB) { const pkgName = "azureoauth" - logger = logger.Scoped(pkgName, "Azure DevOps OAuth config watch") + logger = logger.Scoped(pkgName) conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems { _, problems := parseConfig(logger, cfg, db) return problems diff --git a/cmd/frontend/internal/auth/bitbucketcloudoauth/config.go b/cmd/frontend/internal/auth/bitbucketcloudoauth/config.go index af3bb3f373d..362d1d91da6 100644 --- a/cmd/frontend/internal/auth/bitbucketcloudoauth/config.go +++ b/cmd/frontend/internal/auth/bitbucketcloudoauth/config.go @@ -17,7 +17,7 @@ import ( func Init(logger log.Logger, db database.DB) { const pkgName = "bitbucketcloudoauth" - logger = logger.Scoped(pkgName, "Bitbucket Cloud OAuth config watch") + logger = logger.Scoped(pkgName) conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems { _, problems := parseConfig(logger, cfg, db) return problems diff --git a/cmd/frontend/internal/auth/githubappauth/init.go b/cmd/frontend/internal/auth/githubappauth/init.go index 57619950520..8498d21f217 100644 --- a/cmd/frontend/internal/auth/githubappauth/init.go +++ b/cmd/frontend/internal/auth/githubappauth/init.go @@ -20,6 +20,6 @@ func Init( _ conftypes.UnifiedWatchable, enterpriseServices *enterprise.Services, ) error { - enterpriseServices.GitHubAppsResolver = NewResolver(log.Scoped("GitHubAppsResolver", ""), db) + enterpriseServices.GitHubAppsResolver = NewResolver(log.Scoped("GitHubAppsResolver"), db) return nil } diff --git a/cmd/frontend/internal/auth/githuboauth/config.go b/cmd/frontend/internal/auth/githuboauth/config.go index 935764d218e..a2ed1b0ae4c 100644 --- a/cmd/frontend/internal/auth/githuboauth/config.go +++ b/cmd/frontend/internal/auth/githuboauth/config.go @@ -17,7 +17,7 @@ import ( func Init(logger log.Logger, db database.DB) { const pkgName = "githuboauth" - logger = logger.Scoped(pkgName, "GitHub OAuth config watch") + logger = logger.Scoped(pkgName) conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems { _, problems := parseConfig(logger, cfg, db) return problems diff --git a/cmd/frontend/internal/auth/githuboauth/session.go b/cmd/frontend/internal/auth/githuboauth/session.go index 5ff9407bcea..6225448e640 100644 --- a/cmd/frontend/internal/auth/githuboauth/session.go +++ b/cmd/frontend/internal/auth/githuboauth/session.go @@ -194,7 +194,7 @@ func derefInt64(i *int64) int64 { func (s *sessionIssuerHelper) newClient(token string) *githubsvc.V3Client { apiURL, _ := githubsvc.APIRoot(s.BaseURL) - return githubsvc.NewV3Client(log.Scoped("session.github.v3", "github v3 client for session issuer"), + return githubsvc.NewV3Client(log.Scoped("session.github.v3"), extsvc.URNGitHubOAuth, apiURL, &esauth.OAuthBearerToken{Token: token}, nil) } diff --git a/cmd/frontend/internal/auth/gitlaboauth/config.go b/cmd/frontend/internal/auth/gitlaboauth/config.go index ba3af73b4ed..2532d63a759 100644 --- a/cmd/frontend/internal/auth/gitlaboauth/config.go +++ b/cmd/frontend/internal/auth/gitlaboauth/config.go @@ -17,7 +17,7 @@ import ( func Init(logger log.Logger, db database.DB) { const pkgName = "gitlaboauth" - logger = log.Scoped(pkgName, "GitLab OAuth config watch") + logger = log.Scoped(pkgName) conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems { _, problems := parseConfig(logger, cfg, db) diff --git a/cmd/frontend/internal/auth/gitlaboauth/login.go b/cmd/frontend/internal/auth/gitlaboauth/login.go index 0a5d5feb2d1..4076f4c9c94 100644 --- a/cmd/frontend/internal/auth/gitlaboauth/login.go +++ b/cmd/frontend/internal/auth/gitlaboauth/login.go @@ -69,7 +69,7 @@ func CallbackHandler(config *oauth2.Config, success, failure http.Handler) http. } func gitlabHandler(config *oauth2.Config, success, failure http.Handler) http.Handler { - logger := log.Scoped("GitlabOAuthHandler", "Gitlab OAuth Handler") + logger := log.Scoped("GitlabOAuthHandler") if failure == nil { failure = gologin.DefaultFailureHandler diff --git a/cmd/frontend/internal/auth/httpheader/config.go b/cmd/frontend/internal/auth/httpheader/config.go index a7bd588ee37..b2d22e9df8d 100644 --- a/cmd/frontend/internal/auth/httpheader/config.go +++ b/cmd/frontend/internal/auth/httpheader/config.go @@ -30,7 +30,7 @@ const pkgName = "httpheader" func Init() { conf.ContributeValidator(validateConfig) - logger := log.Scoped(pkgName, "HTTP header authentication config watch") + logger := log.Scoped(pkgName) go func() { conf.Watch(func() { newPC, _ := getProviderConfig() diff --git a/cmd/frontend/internal/auth/init.go b/cmd/frontend/internal/auth/init.go index 42654ee2a7e..7a772513357 100644 --- a/cmd/frontend/internal/auth/init.go +++ b/cmd/frontend/internal/auth/init.go @@ -33,7 +33,7 @@ import ( // Init must be called by the frontend to initialize the auth middlewares. func Init(logger log.Logger, db database.DB) { - logger = logger.Scoped("auth", "provides enterprise authentication middleware") + logger = logger.Scoped("auth") azureoauth.Init(logger, db) bitbucketcloudoauth.Init(logger, db) gerrit.Init() @@ -118,7 +118,7 @@ func Init(logger log.Logger, db database.DB) { } func ssoSignOutHandler(w http.ResponseWriter, r *http.Request) { - logger := log.Scoped("ssoSignOutHandler", "Signing out from SSO providers") + logger := log.Scoped("ssoSignOutHandler") for _, p := range conf.Get().AuthProviders { var err error switch { diff --git a/cmd/frontend/internal/auth/oauth/middleware.go b/cmd/frontend/internal/auth/oauth/middleware.go index 2b1dff81b24..6b7428797d1 100644 --- a/cmd/frontend/internal/auth/oauth/middleware.go +++ b/cmd/frontend/internal/auth/oauth/middleware.go @@ -116,7 +116,7 @@ func withOAuthExternalClient(r *http.Request) *http.Request { if traceLogEnabled { loggingClient := *client loggingClient.Transport = &loggingRoundTripper{ - log: log.Scoped("oauth_external.transport", "transport logger for withOAuthExternalClient"), + log: log.Scoped("oauth_external.transport"), underlying: client.Transport, } client = &loggingClient diff --git a/cmd/frontend/internal/auth/openidconnect/config.go b/cmd/frontend/internal/auth/openidconnect/config.go index e8f42993d68..0904b0079a5 100644 --- a/cmd/frontend/internal/auth/openidconnect/config.go +++ b/cmd/frontend/internal/auth/openidconnect/config.go @@ -66,7 +66,7 @@ func Init() { conf.ContributeValidator(validateConfig) const pkgName = "openidconnect" - logger := log.Scoped(pkgName, "OpenID Connect config watch") + logger := log.Scoped(pkgName) go func() { conf.Watch(func() { ps := getProviders() diff --git a/cmd/frontend/internal/auth/saml/config.go b/cmd/frontend/internal/auth/saml/config.go index 2493eae7087..fbe89542560 100644 --- a/cmd/frontend/internal/auth/saml/config.go +++ b/cmd/frontend/internal/auth/saml/config.go @@ -68,7 +68,7 @@ func Init() { conf.ContributeValidator(validateConfig) const pkgName = "saml" - logger := log.Scoped(pkgName, "SAML config watch") + logger := log.Scoped(pkgName) go func() { conf.Watch(func() { ps := getProviders() diff --git a/cmd/frontend/internal/auth/sourcegraphoperator/config.go b/cmd/frontend/internal/auth/sourcegraphoperator/config.go index 90832db9d71..d873860cd23 100644 --- a/cmd/frontend/internal/auth/sourcegraphoperator/config.go +++ b/cmd/frontend/internal/auth/sourcegraphoperator/config.go @@ -40,7 +40,7 @@ func Init() { conf.ContributeValidator(validateConfig) p := NewProvider(*cloudSiteConfig.AuthProviders.SourcegraphOperator) - logger := log.Scoped(auth.SourcegraphOperatorProviderType, "Sourcegraph Operator config watch") + logger := log.Scoped(auth.SourcegraphOperatorProviderType) go func() { if err := p.Refresh(context.Background()); err != nil { logger.Error("failed to fetch Sourcegraph Operator service provider metadata", log.Error(err)) diff --git a/cmd/frontend/internal/auth/sourcegraphoperator/middleware.go b/cmd/frontend/internal/auth/sourcegraphoperator/middleware.go index b5d6480f91e..d195dd7cc66 100644 --- a/cmd/frontend/internal/auth/sourcegraphoperator/middleware.go +++ b/cmd/frontend/internal/auth/sourcegraphoperator/middleware.go @@ -59,7 +59,7 @@ const ( ) func authHandler(db database.DB) func(w http.ResponseWriter, r *http.Request) { - logger := log.Scoped(internalauth.SourcegraphOperatorProviderType+".authHandler", "Sourcegraph Operator authentication handler") + logger := log.Scoped(internalauth.SourcegraphOperatorProviderType + ".authHandler") return func(w http.ResponseWriter, r *http.Request) { switch strings.TrimPrefix(r.URL.Path, authPrefix) { case "/login": // Endpoint that starts the Authentication Request Code Flow. diff --git a/cmd/frontend/internal/authz/init.go b/cmd/frontend/internal/authz/init.go index 6c2290566a3..aebec0c4821 100644 --- a/cmd/frontend/internal/authz/init.go +++ b/cmd/frontend/internal/authz/init.go @@ -67,7 +67,7 @@ func Init( return problems }) - enterpriseServices.PermissionsGitHubWebhook = webhooks.NewGitHubWebhook(log.Scoped("PermissionsGitHubWebhook", "permissions sync webhook handler for GitHub webhooks")) + enterpriseServices.PermissionsGitHubWebhook = webhooks.NewGitHubWebhook(log.Scoped("PermissionsGitHubWebhook")) authz.DefaultSubRepoPermsChecker = srp.NewSubRepoPermsClient(db.SubRepoPerms()) diff --git a/cmd/frontend/internal/authz/resolvers/resolver.go b/cmd/frontend/internal/authz/resolvers/resolver.go index d684a7f3a3c..87f13b7d182 100644 --- a/cmd/frontend/internal/authz/resolvers/resolver.go +++ b/cmd/frontend/internal/authz/resolvers/resolver.go @@ -54,7 +54,7 @@ func (r *Resolver) checkLicense(feature licensing.Feature) error { func NewResolver(observationCtx *observation.Context, db database.DB) graphqlbackend.AuthzResolver { return &Resolver{ - logger: observationCtx.Logger.Scoped("authz.Resolver", ""), + logger: observationCtx.Logger.Scoped("authz.Resolver"), db: db, } } diff --git a/cmd/frontend/internal/batches/httpapi/file_handler.go b/cmd/frontend/internal/batches/httpapi/file_handler.go index 562cb7dc791..b4b77b0b92a 100644 --- a/cmd/frontend/internal/batches/httpapi/file_handler.go +++ b/cmd/frontend/internal/batches/httpapi/file_handler.go @@ -42,7 +42,7 @@ type BatchesStore interface { // NewFileHandler creates a new FileHandler. func NewFileHandler(db database.DB, store BatchesStore, operations *Operations) *FileHandler { return &FileHandler{ - logger: sglog.Scoped("FileHandler", "Batch Changes mounted file REST API handler"), + logger: sglog.Scoped("FileHandler"), db: db, store: store, operations: operations, diff --git a/cmd/frontend/internal/batches/init.go b/cmd/frontend/internal/batches/init.go index 038808e96da..a6c50a47d6f 100644 --- a/cmd/frontend/internal/batches/init.go +++ b/cmd/frontend/internal/batches/init.go @@ -45,7 +45,7 @@ func Init( // Register enterprise services. gitserverClient := gitserver.NewClient() - logger := sglog.Scoped("Batches", "batch changes webhooks") + logger := sglog.Scoped("Batches") enterpriseServices.BatchChangesResolver = resolvers.New(db, bstore, gitserverClient, logger) enterpriseServices.BatchesGitHubWebhook = webhooks.NewGitHubWebhook(bstore, gitserverClient, logger) enterpriseServices.BatchesBitbucketServerWebhook = webhooks.NewBitbucketServerWebhook(bstore, gitserverClient, logger) diff --git a/cmd/frontend/internal/bg/delete_old_event_logs_in_postgres.go b/cmd/frontend/internal/bg/delete_old_event_logs_in_postgres.go index 775d62913c2..862b53d0de5 100644 --- a/cmd/frontend/internal/bg/delete_old_event_logs_in_postgres.go +++ b/cmd/frontend/internal/bg/delete_old_event_logs_in_postgres.go @@ -11,7 +11,7 @@ import ( ) func DeleteOldEventLogsInPostgres(ctx context.Context, logger log.Logger, db database.DB) { - logger = logger.Scoped("deleteOldEventLogs", "background job to prune old event logs in database") + logger = logger.Scoped("deleteOldEventLogs") for { // We choose 93 days as the interval to ensure that we have at least the last three months @@ -28,7 +28,7 @@ func DeleteOldEventLogsInPostgres(ctx context.Context, logger log.Logger, db dat } func DeleteOldSecurityEventLogsInPostgres(ctx context.Context, logger log.Logger, db database.DB) { - logger = logger.Scoped("deleteOldSecurityEventLogs", "background job to prune old security event logs in database") + logger = logger.Scoped("deleteOldSecurityEventLogs") for { time.Sleep(time.Hour) diff --git a/cmd/frontend/internal/bg/update_permissions.go b/cmd/frontend/internal/bg/update_permissions.go index 5b69222b325..d60613ee7dc 100644 --- a/cmd/frontend/internal/bg/update_permissions.go +++ b/cmd/frontend/internal/bg/update_permissions.go @@ -19,7 +19,7 @@ import ( // UpdatePermissions is called as part of the background process by the `frontend` service. func UpdatePermissions(ctx context.Context, logger log.Logger, db database.DB) { - scopedLog := logger.Scoped("permission_update", "Updates the permission in the database based on the rbac schema configuration.") + scopedLog := logger.Scoped("permission_update") err := db.WithTransact(ctx, func(tx database.DB) error { permissionStore := tx.Permissions() rolePermissionStore := tx.RolePermissions() diff --git a/cmd/frontend/internal/cli/config.go b/cmd/frontend/internal/cli/config.go index ab9cac03c13..dec4abb95b5 100644 --- a/cmd/frontend/internal/cli/config.go +++ b/cmd/frontend/internal/cli/config.go @@ -39,7 +39,7 @@ import ( ) func printConfigValidation(logger log.Logger) { - logger = logger.Scoped("configValidation", "") + logger = logger.Scoped("configValidation") messages, err := conf.Validate(conf.Raw()) if err != nil { logger.Warn("unable to validate Sourcegraph site configuration", log.Error(err)) @@ -110,7 +110,7 @@ func readSiteConfigFile(paths []string) ([]byte, error) { } func overrideSiteConfig(ctx context.Context, logger log.Logger, db database.DB) error { - logger = logger.Scoped("overrideSiteConfig", "") + logger = logger.Scoped("overrideSiteConfig") paths := filepath.SplitList(os.Getenv("SITE_CONFIG_FILE")) if len(paths) == 0 { return nil @@ -163,7 +163,7 @@ func overrideSiteConfig(ctx context.Context, logger log.Logger, db database.DB) } func overrideGlobalSettings(ctx context.Context, logger log.Logger, db database.DB) error { - logger = logger.Scoped("overrideGlobalSettings", "") + logger = logger.Scoped("overrideGlobalSettings") path := os.Getenv("GLOBAL_SETTINGS_FILE") if path == "" { return nil @@ -208,7 +208,7 @@ func overrideGlobalSettings(ctx context.Context, logger log.Logger, db database. } func overrideExtSvcConfig(ctx context.Context, logger log.Logger, db database.DB) error { - logger = logger.Scoped("overrideExtSvcConfig", "") + logger = logger.Scoped("overrideExtSvcConfig") path := os.Getenv("EXTSVC_CONFIG_FILE") if path == "" { return nil @@ -385,7 +385,7 @@ func overrideExtSvcConfig(ctx context.Context, logger log.Logger, db database.DB } func watchUpdate(ctx context.Context, logger log.Logger, update func(context.Context) (bool, error), paths ...string) { - logger = logger.Scoped("watch", "").With(log.Strings("files", paths)) + logger = logger.Scoped("watch").With(log.Strings("files", paths)) events, err := watchPaths(ctx, paths...) if err != nil { logger.Error("failed to watch config override files", log.Error(err)) @@ -463,7 +463,7 @@ func watchPaths(ctx context.Context, paths ...string) (<-chan error, error) { func newConfigurationSource(logger log.Logger, db database.DB) *configurationSource { return &configurationSource{ - logger: logger.Scoped("configurationSource", ""), + logger: logger.Scoped("configurationSource"), db: db, } } diff --git a/cmd/frontend/internal/cli/http.go b/cmd/frontend/internal/cli/http.go index 86f1c7f93c1..4fe8a079cd9 100644 --- a/cmd/frontend/internal/cli/http.go +++ b/cmd/frontend/internal/cli/http.go @@ -47,7 +47,7 @@ func newExternalHTTPHandler( newExecutorProxyHandler enterprise.NewExecutorProxyHandler, newGitHubAppSetupHandler enterprise.NewGitHubAppSetupHandler, ) (http.Handler, error) { - logger := log.Scoped("external", "external http handlers") + logger := log.Scoped("external") // Each auth middleware determines on a per-request basis whether it should be enabled (if not, it // immediately delegates the request to the next middleware in the chain). @@ -148,7 +148,7 @@ func newInternalHTTPHandler( rateLimitWatcher graphqlbackend.LimitWatcher, ) http.Handler { internalMux := http.NewServeMux() - logger := log.Scoped("internal", "internal http handlers") + logger := log.Scoped("internal") internalRouter := router.NewInternal(mux.NewRouter().PathPrefix("/.internal/").Subrouter()) internalhttpapi.RegisterInternalServices( diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index 3c1a83652bc..660a5695a5c 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -395,7 +395,7 @@ func makeRateLimitWatcher() (*graphqlbackend.BasicLimitWatcher, error) { return nil, err } - return graphqlbackend.NewBasicLimitWatcher(sglog.Scoped("BasicLimitWatcher", "basic rate-limiter"), store), nil + return graphqlbackend.NewBasicLimitWatcher(sglog.Scoped("BasicLimitWatcher"), store), nil } // redispoolRegisterDB registers our postgres backed redis. These package diff --git a/cmd/frontend/internal/codeintel/init.go b/cmd/frontend/internal/codeintel/init.go index f18c8d95668..a0a9aa88706 100644 --- a/cmd/frontend/internal/codeintel/init.go +++ b/cmd/frontend/internal/codeintel/init.go @@ -138,5 +138,5 @@ func Init( } func scopedContext(name string) *observation.Context { - return observation.NewContext(log.Scoped(name+".transport.graphql", "codeintel "+name+" graphql transport")) + return observation.NewContext(log.Scoped(name + ".transport.graphql")) } diff --git a/cmd/frontend/internal/codemonitors/init.go b/cmd/frontend/internal/codemonitors/init.go index e5c82d4bde0..1c069a8c3df 100644 --- a/cmd/frontend/internal/codemonitors/init.go +++ b/cmd/frontend/internal/codemonitors/init.go @@ -21,6 +21,6 @@ func Init( _ conftypes.UnifiedWatchable, enterpriseServices *enterprise.Services, ) error { - enterpriseServices.CodeMonitorsResolver = resolvers.NewResolver(log.Scoped("codeMonitorResolver", ""), db) + enterpriseServices.CodeMonitorsResolver = resolvers.NewResolver(log.Scoped("codeMonitorResolver"), db) return nil } diff --git a/cmd/frontend/internal/completions/init.go b/cmd/frontend/internal/completions/init.go index 26112473bc7..e1b55399ae8 100644 --- a/cmd/frontend/internal/completions/init.go +++ b/cmd/frontend/internal/completions/init.go @@ -24,7 +24,7 @@ func Init( _ conftypes.UnifiedWatchable, enterpriseServices *enterprise.Services, ) error { - logger := log.Scoped("completions", "Cody completions") + logger := log.Scoped("completions") enterpriseServices.NewChatCompletionsStreamHandler = func() http.Handler { completionsHandler := httpapi.NewChatCompletionsStreamHandler(logger, db) diff --git a/cmd/frontend/internal/compute/init.go b/cmd/frontend/internal/compute/init.go index 5dbf417630b..9d82c630e18 100644 --- a/cmd/frontend/internal/compute/init.go +++ b/cmd/frontend/internal/compute/init.go @@ -23,7 +23,7 @@ func Init( _ conftypes.UnifiedWatchable, enterpriseServices *enterprise.Services, ) error { - logger := log.Scoped("compute", "") + logger := log.Scoped("compute") enterpriseServices.ComputeResolver = resolvers.NewResolver(logger, db) enterpriseServices.NewComputeStreamHandler = func() http.Handler { return streaming.NewComputeStreamHandler(logger, db) diff --git a/cmd/frontend/internal/contentlibrary/init.go b/cmd/frontend/internal/contentlibrary/init.go index c36ad4beaa4..c03b35315c4 100644 --- a/cmd/frontend/internal/contentlibrary/init.go +++ b/cmd/frontend/internal/contentlibrary/init.go @@ -22,7 +22,7 @@ func Init( _ conftypes.UnifiedWatchable, enterpriseServices *enterprise.Services, ) error { - logger := logger.Scoped("contentlibrary", "sourcegraph content library") + logger := logger.Scoped("contentlibrary") enterpriseServices.ContentLibraryResolver = graphqlbackend.NewContentLibraryResolver(db, logger) return nil } diff --git a/cmd/frontend/internal/dotcom/init.go b/cmd/frontend/internal/dotcom/init.go index 12a0ebecea9..3ce2b3a59bf 100644 --- a/cmd/frontend/internal/dotcom/init.go +++ b/cmd/frontend/internal/dotcom/init.go @@ -51,11 +51,11 @@ func Init( if envvar.SourcegraphDotComMode() { enterpriseServices.DotcomRootResolver = dotcomRootResolver{ ProductSubscriptionLicensingResolver: productsubscription.ProductSubscriptionLicensingResolver{ - Logger: observationCtx.Logger.Scoped("productsubscriptions", "resolvers for dotcom product subscriptions"), + Logger: observationCtx.Logger.Scoped("productsubscriptions"), DB: db, }, CodyGatewayDotcomUserResolver: productsubscription.CodyGatewayDotcomUserResolver{ - Logger: observationCtx.Logger.Scoped("codygatewayuser", "resolvers for dotcom cody gateway users"), + Logger: observationCtx.Logger.Scoped("codygatewayuser"), DB: db, }, } diff --git a/cmd/frontend/internal/dotcom/productsubscription/license_anomaly.go b/cmd/frontend/internal/dotcom/productsubscription/license_anomaly.go index 86aade6815c..61280a74404 100644 --- a/cmd/frontend/internal/dotcom/productsubscription/license_anomaly.go +++ b/cmd/frontend/internal/dotcom/productsubscription/license_anomaly.go @@ -39,7 +39,7 @@ func StartCheckForAnomalousLicenseUsage(logger log.Logger, db database.DB) { client := slack.New(dotcom.SlackLicenseAnomallyWebhook) t := time.NewTicker(1 * time.Hour) - logger = logger.Scoped("StartCheckForAnomalousLicenseUsage", "starts the checks for anomalous license usage") + logger = logger.Scoped("StartCheckForAnomalousLicenseUsage") for range t.C { maybeCheckAnomalies(logger, db, client, glock.NewRealClock(), redispool.Store) diff --git a/cmd/frontend/internal/dotcom/productsubscription/license_check_handler.go b/cmd/frontend/internal/dotcom/productsubscription/license_check_handler.go index 48b8f96870f..53d0671db8a 100644 --- a/cmd/frontend/internal/dotcom/productsubscription/license_check_handler.go +++ b/cmd/frontend/internal/dotcom/productsubscription/license_check_handler.go @@ -35,7 +35,7 @@ var ( ) func logEvent(ctx context.Context, db database.DB, name string, siteID string) { - logger := log.Scoped("LicenseCheckHandler logEvent", "Event logging for LicenseCheckHandler") + logger := log.Scoped("LicenseCheckHandler logEvent") eArg, err := json.Marshal(struct { SiteID string `json:"site_id,omitempty"` }{ @@ -88,7 +88,7 @@ func sendSlackMessage(logger log.Logger, license *dbLicense, siteID string) { } func NewLicenseCheckHandler(db database.DB) http.Handler { - baseLogger := log.Scoped("LicenseCheckHandler", "Handles license validity checks") + baseLogger := log.Scoped("LicenseCheckHandler") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() diff --git a/cmd/frontend/internal/dotcom/productsubscription/license_expiration.go b/cmd/frontend/internal/dotcom/productsubscription/license_expiration.go index 27b82c801bb..9fbc9526111 100644 --- a/cmd/frontend/internal/dotcom/productsubscription/license_expiration.go +++ b/cmd/frontend/internal/dotcom/productsubscription/license_expiration.go @@ -34,7 +34,7 @@ func StartCheckForUpcomingLicenseExpirations(logger log.Logger, db database.DB) client := slack.New(dotcom.SlackLicenseExpirationWebhook) t := time.NewTicker(1 * time.Hour) - logger = logger.Scoped("StartCheckForUpcomingLicenseExpirations", "starts the various checks for upcoming license expiry") + logger = logger.Scoped("StartCheckForUpcomingLicenseExpirations") for range t.C { checkLicensesIfNeeded(logger, db, client) } diff --git a/cmd/frontend/internal/executorqueue/handler/handler.go b/cmd/frontend/internal/executorqueue/handler/handler.go index 0af0d29e8c1..a08e6274d2c 100644 --- a/cmd/frontend/internal/executorqueue/handler/handler.go +++ b/cmd/frontend/internal/executorqueue/handler/handler.go @@ -83,7 +83,6 @@ func NewHandler[T workerutil.Record]( metricsStore: metricsStore, logger: log.Scoped( fmt.Sprintf("executor-queue-handler-%s", queueHandler.Name), - fmt.Sprintf("The route handler for all executor %s dbworker API tunnel endpoints", queueHandler.Name), ), queueHandler: queueHandler, } @@ -140,7 +139,7 @@ func (h *handler[T]) dequeue(ctx context.Context, queueName string, metadata exe return executortypes.Job{}, false, nil } - logger := log.Scoped("dequeue", "Select a job record from the database.") + logger := log.Scoped("dequeue") job, err := h.queueHandler.RecordTransformer(ctx, metadata.version, record, metadata.resources) if err != nil { if _, err := h.queueHandler.Store.MarkFailed(ctx, record.RecordID(), fmt.Sprintf("failed to transform record: %s", err), store.MarkFinalOptions{}); err != nil { @@ -384,7 +383,7 @@ func (h *handler[T]) heartbeat(ctx context.Context, executor types.Executor, ids return nil, nil, err } - logger := log.Scoped("heartbeat", "Write this heartbeat to the database") + logger := log.Scoped("heartbeat") // Write this heartbeat to the database so that we can populate the UI with recent executor activity. if err := h.executorStore.UpsertHeartbeat(ctx, executor); err != nil { diff --git a/cmd/frontend/internal/executorqueue/handler/multihandler.go b/cmd/frontend/internal/executorqueue/handler/multihandler.go index 840ded94ee4..737a6cc52db 100644 --- a/cmd/frontend/internal/executorqueue/handler/multihandler.go +++ b/cmd/frontend/internal/executorqueue/handler/multihandler.go @@ -62,7 +62,7 @@ func NewMultiHandler( BatchesQueueHandler: batchesQueueHandler, DequeueCache: dequeueCache, dequeueCacheConfig: dequeueCacheConfig, - logger: log.Scoped("executor-multi-queue-handler", "The route handler for all executor queues"), + logger: log.Scoped("executor-multi-queue-handler"), } return multiHandler } @@ -143,7 +143,7 @@ func (m *MultiHandler) dequeue(ctx context.Context, req executortypes.DequeueReq DiskSpace: req.DiskSpace, } - logger := m.logger.Scoped("dequeue", "Pick a job record from the database.") + logger := m.logger.Scoped("dequeue") var job executortypes.Job switch selectedQueue { case m.BatchesQueueHandler.Name: @@ -195,7 +195,7 @@ func (m *MultiHandler) dequeue(ctx context.Context, req executortypes.DequeueReq job.Version = 2 } - logger = m.logger.Scoped("token", "Create or regenerate a job token.") + logger = m.logger.Scoped("token") token, err := m.jobTokenStore.Create(ctx, job.ID, job.Queue, job.RepositoryName) if err != nil { if errors.Is(err, executorstore.ErrJobTokenAlreadyCreated) { @@ -361,7 +361,7 @@ func (m *MultiHandler) heartbeat(ctx context.Context, executor types.Executor, i ) } - logger := log.Scoped("multiqueue.heartbeat", "Write the heartbeat of multiple queues to the database") + logger := log.Scoped("multiqueue.heartbeat") // Write this heartbeat to the database so that we can populate the UI with recent executor activity. if err = m.executorStore.UpsertHeartbeat(ctx, executor); err != nil { diff --git a/cmd/frontend/internal/executorqueue/init.go b/cmd/frontend/internal/executorqueue/init.go index b2957c179f7..1e52cad5c2b 100644 --- a/cmd/frontend/internal/executorqueue/init.go +++ b/cmd/frontend/internal/executorqueue/init.go @@ -30,7 +30,7 @@ func Init( return conf.SiteConfig().ExecutorsAccessToken } - logger := log.Scoped("executorqueue", "") + logger := log.Scoped("executorqueue") queueHandler := newExecutorQueuesHandler( observationCtx, diff --git a/cmd/frontend/internal/executorqueue/queues/batches/queue.go b/cmd/frontend/internal/executorqueue/queues/batches/queue.go index 83eb4af1d2f..555aa985510 100644 --- a/cmd/frontend/internal/executorqueue/queues/batches/queue.go +++ b/cmd/frontend/internal/executorqueue/queues/batches/queue.go @@ -14,7 +14,7 @@ import ( ) func QueueHandler(observationCtx *observation.Context, db database.DB, _ func() string) handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob] { - logger := log.Scoped("executor-queue.batches", "The executor queue handlers for the batches queue") + logger := log.Scoped("executor-queue.batches") recordTransformer := func(ctx context.Context, version string, record *btypes.BatchSpecWorkspaceExecutionJob, _ handler.ResourceMetadata) (apiclient.Job, error) { batchesStore := bstore.New(db, observationCtx, nil) return transformRecord(ctx, logger, batchesStore, record, version) diff --git a/cmd/frontend/internal/httpapi/auth.go b/cmd/frontend/internal/httpapi/auth.go index 3144eb96660..bad0cee24f2 100644 --- a/cmd/frontend/internal/httpapi/auth.go +++ b/cmd/frontend/internal/httpapi/auth.go @@ -29,7 +29,7 @@ const authAuditEntity = "httpapi/auth" // AccessTokenAuthMiddleware authenticates the user based on the // token query parameter or the "Authorization" header. func AccessTokenAuthMiddleware(db database.DB, baseLogger log.Logger, next http.Handler) http.Handler { - baseLogger = baseLogger.Scoped("accessTokenAuth", "Access token authentication middleware") + baseLogger = baseLogger.Scoped("accessTokenAuth") return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { // SCIM uses an auth token which is checked separately in the SCIM package. if strings.HasPrefix(r.URL.Path, "/.api/scim/v2") { diff --git a/cmd/frontend/internal/httpapi/httpapi.go b/cmd/frontend/internal/httpapi/httpapi.go index 7c2c1e3640d..453f4b6f981 100644 --- a/cmd/frontend/internal/httpapi/httpapi.go +++ b/cmd/frontend/internal/httpapi/httpapi.go @@ -99,7 +99,7 @@ func NewHandler( rateLimiter graphqlbackend.LimitWatcher, handlers *Handlers, ) (http.Handler, error) { - logger := sglog.Scoped("Handler", "frontend HTTP API handler") + logger := sglog.Scoped("Handler") if m == nil { m = apirouter.New(nil) @@ -123,7 +123,7 @@ func NewHandler( ) wh := webhooks.Router{ - Logger: logger.Scoped("webhooks.Router", "handling webhook requests and dispatching them to handlers"), + Logger: logger.Scoped("webhooks.Router"), DB: db, } webhookhandlers.Init(&wh) @@ -222,7 +222,7 @@ func RegisterInternalServices( newComputeStreamHandler enterprise.NewComputeStreamHandler, rateLimitWatcher graphqlbackend.LimitWatcher, ) { - logger := sglog.Scoped("InternalHandler", "frontend internal HTTP API handler") + logger := sglog.Scoped("InternalHandler") m.StrictSlash(true) handler := JsonMiddleware(&ErrorHandler{ @@ -235,7 +235,7 @@ func RegisterInternalServices( gsClient := gitserver.NewClient() indexer := &searchIndexerServer{ db: db, - logger: logger.Scoped("searchIndexerServer", "zoekt-indexserver endpoints"), + logger: logger.Scoped("searchIndexerServer"), gitserverClient: gsClient, ListIndexable: backend.NewRepos(logger, db, gsClient).ListIndexable, RepoStore: db.Repos(), diff --git a/cmd/frontend/internal/httpapi/releasecache/cache.go b/cmd/frontend/internal/httpapi/releasecache/cache.go index 606503f7267..1092bcd4ab6 100644 --- a/cmd/frontend/internal/httpapi/releasecache/cache.go +++ b/cmd/frontend/internal/httpapi/releasecache/cache.go @@ -38,7 +38,7 @@ type releaseCache struct { func newReleaseCache(logger log.Logger, client *github.V4Client, owner, name string) ReleaseCache { return &releaseCache{ client: client, - logger: logger.Scoped("ReleaseCache", "release cache"), + logger: logger.Scoped("ReleaseCache"), branches: map[string]string{}, owner: owner, name: name, diff --git a/cmd/frontend/internal/httpapi/releasecache/http.go b/cmd/frontend/internal/httpapi/releasecache/http.go index f4f13f393c6..b3d0ac18f43 100644 --- a/cmd/frontend/internal/httpapi/releasecache/http.go +++ b/cmd/frontend/internal/httpapi/releasecache/http.go @@ -38,10 +38,10 @@ type handler struct { func NewHandler(logger log.Logger) http.Handler { ctx := context.Background() - logger = logger.Scoped("srcclicache", "src-cli release cache") + logger = logger.Scoped("srcclicache") handler := &handler{ - logger: logger.Scoped("handler", "src-cli release cache HTTP handler"), + logger: logger.Scoped("handler"), } // We'll build all the state up in a conf watcher, since the behaviour of diff --git a/cmd/frontend/internal/httpapi/repo_refresh.go b/cmd/frontend/internal/httpapi/repo_refresh.go index e1a1e813df7..ad4e40d164c 100644 --- a/cmd/frontend/internal/httpapi/repo_refresh.go +++ b/cmd/frontend/internal/httpapi/repo_refresh.go @@ -13,7 +13,7 @@ import ( ) func serveRepoRefresh(db database.DB) func(http.ResponseWriter, *http.Request) error { - logger := log.Scoped("serveRepoRefresh", "") + logger := log.Scoped("serveRepoRefresh") return func(w http.ResponseWriter, r *http.Request) error { ctx := r.Context() diff --git a/cmd/frontend/internal/httpapi/src_cli.go b/cmd/frontend/internal/httpapi/src_cli.go index 1b0c5951b84..84082508500 100644 --- a/cmd/frontend/internal/httpapi/src_cli.go +++ b/cmd/frontend/internal/httpapi/src_cli.go @@ -60,7 +60,7 @@ func newSrcCliVersionHandler(logger log.Logger) http.Handler { return &srcCliVersionHandler{ clock: glock.NewRealClock(), doer: httpcli.ExternalClient, - logger: logger.Scoped("srcCliVersionHandler", "HTTP handler for src-cli versions and downloads"), + logger: logger.Scoped("srcCliVersionHandler"), maxStale: srcCliCacheLifetime, } } diff --git a/cmd/frontend/internal/httpapi/webhookhandlers/handlers.go b/cmd/frontend/internal/httpapi/webhookhandlers/handlers.go index e43871355eb..43340649335 100644 --- a/cmd/frontend/internal/httpapi/webhookhandlers/handlers.go +++ b/cmd/frontend/internal/httpapi/webhookhandlers/handlers.go @@ -8,7 +8,7 @@ import ( ) func Init(w *webhooks.Router) { - logger := log.Scoped("webhookhandlers", "handling webhook events for authz events") + logger := log.Scoped("webhookhandlers") // Refer to https://docs.github.com/en/developers/webhooks-and-events/webhooks/webhook-events-and-payloads // for event types diff --git a/cmd/frontend/internal/insights/resolvers/admin_resolver.go b/cmd/frontend/internal/insights/resolvers/admin_resolver.go index e93b5d7fc84..2c7bf12852c 100644 --- a/cmd/frontend/internal/insights/resolvers/admin_resolver.go +++ b/cmd/frontend/internal/insights/resolvers/admin_resolver.go @@ -403,7 +403,7 @@ func (r *Resolver) InsightAdminBackfillQueue(ctx context.Context, args *graphqlb store := &adminBackfillQueueConnectionStore{ args: args, backfillStore: scheduler.NewBackfillStore(r.insightsDB), - logger: r.logger.Scoped("backfillqueue", "insights admin backfill queue resolver"), + logger: r.logger.Scoped("backfillqueue"), mainDB: r.postgresDB, } diff --git a/cmd/frontend/internal/insights/resolvers/aggregates_resolvers.go b/cmd/frontend/internal/insights/resolvers/aggregates_resolvers.go index f9728aa092e..b5209efe754 100644 --- a/cmd/frontend/internal/insights/resolvers/aggregates_resolvers.go +++ b/cmd/frontend/internal/insights/resolvers/aggregates_resolvers.go @@ -62,7 +62,7 @@ type searchAggregateResolver struct { func (r *searchAggregateResolver) getLogger() log.Logger { if r.logger == nil { - r.logger = log.Scoped("searchAggregations", "") + r.logger = log.Scoped("searchAggregations") } return r.logger } diff --git a/cmd/frontend/internal/insights/resolvers/insight_series_resolver.go b/cmd/frontend/internal/insights/resolvers/insight_series_resolver.go index 0051bc9ed4e..69a303ff24f 100644 --- a/cmd/frontend/internal/insights/resolvers/insight_series_resolver.go +++ b/cmd/frontend/internal/insights/resolvers/insight_series_resolver.go @@ -177,7 +177,7 @@ func (p *precalculatedInsightSeriesResolver) Label() string { func (p *precalculatedInsightSeriesResolver) Points(ctx context.Context, _ *graphqlbackend.InsightsPointsArgs) ([]graphqlbackend.InsightsDataPointResolver, error) { resolvers := make([]graphqlbackend.InsightsDataPointResolver, 0, len(p.points)) - db := database.NewDBWith(log.Scoped("Points", ""), p.workerBaseStore) + db := database.NewDBWith(log.Scoped("Points"), p.workerBaseStore) scHandler := store.NewSearchContextHandler(db) modifiedPoints := removeClosePoints(p.points, p.series) filterRepoIncludes := []string{} @@ -364,7 +364,7 @@ func getRecordedSeriesPointOpts(ctx context.Context, db database.DB, timeseriesS var loadingStrategyRED = metrics.NewREDMetrics(prometheus.DefaultRegisterer, "src_insights_loading_strategy", metrics.WithLabels("in_mem", "capture")) func fetchSeries(ctx context.Context, definition types.InsightViewSeries, filters types.InsightViewFilters, options types.SeriesDisplayOptions, r *baseInsightResolver) (points []store.SeriesPoint, err error) { - opts, err := getRecordedSeriesPointOpts(ctx, database.NewDBWith(log.Scoped("recordedSeries", ""), r.postgresDB), r.timeSeriesStore, definition, filters, options) + opts, err := getRecordedSeriesPointOpts(ctx, database.NewDBWith(log.Scoped("recordedSeries"), r.postgresDB), r.timeSeriesStore, definition, filters, options) if err != nil { return nil, errors.Wrap(err, "getRecordedSeriesPointOpts") } diff --git a/cmd/frontend/internal/insights/resolvers/live_preview_resolvers.go b/cmd/frontend/internal/insights/resolvers/live_preview_resolvers.go index 9d509235948..516b4d13b24 100644 --- a/cmd/frontend/internal/insights/resolvers/live_preview_resolvers.go +++ b/cmd/frontend/internal/insights/resolvers/live_preview_resolvers.go @@ -170,7 +170,7 @@ func (s *searchInsightLivePreviewSeriesResolver) Label(ctx context.Context) (str func getPreviewRepos(ctx context.Context, repoScope graphqlbackend.RepositoryScopeInput, logger log.Logger) ([]string, error) { var repos []string if repoScope.RepositoryCriteria != nil { - repoQueryExecutor := query.NewStreamingRepoQueryExecutor(logger.Scoped("live_preview_resolver", "")) + repoQueryExecutor := query.NewStreamingRepoQueryExecutor(logger.Scoped("live_preview_resolver")) repoQuery, err := querybuilder.RepositoryScopeQuery(*repoScope.RepositoryCriteria) if err != nil { return nil, err diff --git a/cmd/frontend/internal/insights/resolvers/resolver.go b/cmd/frontend/internal/insights/resolvers/resolver.go index 979543bdad0..0f4f6fc34aa 100644 --- a/cmd/frontend/internal/insights/resolvers/resolver.go +++ b/cmd/frontend/internal/insights/resolvers/resolver.go @@ -80,12 +80,12 @@ func New(db edb.InsightsDB, postgres database.DB) graphqlbackend.InsightsResolve func newWithClock(db edb.InsightsDB, postgres database.DB, clock func() time.Time) *Resolver { base := WithBase(db, postgres, clock) return &Resolver{ - logger: log.Scoped("Resolver", ""), + logger: log.Scoped("Resolver"), baseInsightResolver: *base, timeSeriesStore: base.timeSeriesStore, insightMetadataStore: base.insightStore, dataSeriesStore: base.insightStore, - insightEnqueuer: background.NewInsightEnqueuer(clock, base.workerBaseStore, log.Scoped("resolver insight enqueuer", "")), + insightEnqueuer: background.NewInsightEnqueuer(clock, base.workerBaseStore, log.Scoped("resolver insight enqueuer")), } } @@ -126,7 +126,7 @@ type AggregationResolver struct { func NewAggregationResolver(observationCtx *observation.Context, postgres database.DB) graphqlbackend.InsightsAggregationResolver { return &AggregationResolver{ - logger: log.Scoped("AggregationResolver", ""), + logger: log.Scoped("AggregationResolver"), postgresDB: postgres, operations: newAggregationsOperations(observationCtx), } diff --git a/cmd/frontend/internal/insights/resolvers/scoped_insight_resolvers.go b/cmd/frontend/internal/insights/resolvers/scoped_insight_resolvers.go index 017d18e1470..5e2dc9c7577 100644 --- a/cmd/frontend/internal/insights/resolvers/scoped_insight_resolvers.go +++ b/cmd/frontend/internal/insights/resolvers/scoped_insight_resolvers.go @@ -52,7 +52,7 @@ func (r *Resolver) PreviewRepositoriesFromQuery(ctx context.Context, args graphq return nil, errors.Wrap(err, "could not build repository scope query") } - executor := query.NewStreamingRepoQueryExecutor(r.logger.Scoped("StreamingRepoQueryExecutor", "preview repositories")) + executor := query.NewStreamingRepoQueryExecutor(r.logger.Scoped("StreamingRepoQueryExecutor")) repos, err := executor.ExecuteRepoList(ctx, repoScopeQuery.String()) if err != nil { return nil, errors.Wrap(err, "executing the repository search errored") diff --git a/cmd/frontend/internal/licensing/init/init.go b/cmd/frontend/internal/licensing/init/init.go index f590014e839..3cfe9d65f9c 100644 --- a/cmd/frontend/internal/licensing/init/init.go +++ b/cmd/frontend/internal/licensing/init/init.go @@ -50,7 +50,7 @@ func Init( // services when the max is reached. database.BeforeCreateExternalService = enforcement.NewBeforeCreateExternalServiceHook() - logger := log.Scoped("licensing", "licensing enforcement") + logger := log.Scoped("licensing") // Surface basic, non-sensitive information about the license type. This information // can be used to soft-gate features from the UI, and to provide info to admins from diff --git a/cmd/frontend/internal/own/init.go b/cmd/frontend/internal/own/init.go index 637c746595f..d3a368bde53 100644 --- a/cmd/frontend/internal/own/init.go +++ b/cmd/frontend/internal/own/init.go @@ -23,6 +23,6 @@ func Init( enterpriseServices *enterprise.Services, ) error { g := gitserver.NewClient() - enterpriseServices.OwnResolver = resolvers.New(db, g, observationCtx.Logger.Scoped("own", "Code ownership")) + enterpriseServices.OwnResolver = resolvers.New(db, g, observationCtx.Logger.Scoped("own")) return nil } diff --git a/cmd/frontend/internal/rbac/init.go b/cmd/frontend/internal/rbac/init.go index a396e5f6cbc..2cc48bd7fb3 100644 --- a/cmd/frontend/internal/rbac/init.go +++ b/cmd/frontend/internal/rbac/init.go @@ -22,7 +22,7 @@ func Init( _ conftypes.UnifiedWatchable, enterpriseServices *enterprise.Services, ) error { - logger := log.Scoped("rbac", "") + logger := log.Scoped("rbac") enterpriseServices.RBACResolver = resolvers.New(logger, db) return nil diff --git a/cmd/frontend/internal/repos/webhooks/handlers.go b/cmd/frontend/internal/repos/webhooks/handlers.go index 4b8d7185b88..bd76922f295 100644 --- a/cmd/frontend/internal/repos/webhooks/handlers.go +++ b/cmd/frontend/internal/repos/webhooks/handlers.go @@ -48,7 +48,7 @@ type GitHubHandler struct { func NewGitHubHandler() *GitHubHandler { return &GitHubHandler{ - logger: log.Scoped("webhooks.GitHubHandler", "github webhook handler"), + logger: log.Scoped("webhooks.GitHubHandler"), } } @@ -75,7 +75,7 @@ type GitLabHandler struct { func NewGitLabHandler() *GitLabHandler { return &GitLabHandler{ - logger: log.Scoped("webhooks.GitLabHandler", "gitlab webhook handler"), + logger: log.Scoped("webhooks.GitLabHandler"), } } @@ -102,7 +102,7 @@ type BitbucketServerHandler struct { func NewBitbucketServerHandler() *BitbucketServerHandler { return &BitbucketServerHandler{ - logger: log.Scoped("webhooks.BitbucketServerHandler", "bitbucket server webhook handler"), + logger: log.Scoped("webhooks.BitbucketServerHandler"), } } @@ -136,7 +136,7 @@ type BitbucketCloudHandler struct { func NewBitbucketCloudHandler() *BitbucketCloudHandler { return &BitbucketCloudHandler{ - logger: log.Scoped("webhooks.BitbucketCloudHandler", "bitbucket cloud webhook handler"), + logger: log.Scoped("webhooks.BitbucketCloudHandler"), } } diff --git a/cmd/frontend/internal/search/search.go b/cmd/frontend/internal/search/search.go index 1a7f1474e37..ea1adc3be27 100644 --- a/cmd/frontend/internal/search/search.go +++ b/cmd/frontend/internal/search/search.go @@ -39,7 +39,7 @@ import ( // StreamHandler is an http handler which streams back search results. func StreamHandler(db database.DB) http.Handler { - logger := log.Scoped("searchStreamHandler", "") + logger := log.Scoped("searchStreamHandler") return &streamHandler{ logger: logger, db: db, diff --git a/cmd/frontend/internal/telemetry/init.go b/cmd/frontend/internal/telemetry/init.go index eb293481079..759bf77a1a2 100644 --- a/cmd/frontend/internal/telemetry/init.go +++ b/cmd/frontend/internal/telemetry/init.go @@ -24,7 +24,7 @@ func Init( ) error { enterpriseServices.TelemetryRootResolver = &graphqlbackend.TelemetryRootResolver{ Resolver: resolvers.New( - observationCtx.Logger.Scoped("telemetry", "Telemetry V2 resolver"), + observationCtx.Logger.Scoped("telemetry"), db), } diff --git a/cmd/frontend/shared/shared.go b/cmd/frontend/shared/shared.go index 4871e3ce873..77845790f3e 100644 --- a/cmd/frontend/shared/shared.go +++ b/cmd/frontend/shared/shared.go @@ -78,7 +78,7 @@ var initFunctions = map[string]EnterpriseInitializer{ } func EnterpriseSetupHook(db database.DB, conf conftypes.UnifiedWatchable) enterprise.Services { - logger := log.Scoped("enterprise", "frontend enterprise edition") + logger := log.Scoped("enterprise") debug, _ := strconv.ParseBool(os.Getenv("DEBUG")) if debug { logger.Debug("enterprise edition") diff --git a/cmd/frontend/webhooks/github_webhooks.go b/cmd/frontend/webhooks/github_webhooks.go index f12598f8a4e..8142d906590 100644 --- a/cmd/frontend/webhooks/github_webhooks.go +++ b/cmd/frontend/webhooks/github_webhooks.go @@ -25,7 +25,7 @@ type GitHubWebhook struct { } func (h *GitHubWebhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { - logger := log.Scoped("ServeGitHubWebhook", "direct endpoint for github webhook") + logger := log.Scoped("ServeGitHubWebhook") body, err := io.ReadAll(r.Body) if err != nil { log15.Error("Error parsing github webhook event", "error", err) diff --git a/cmd/frontend/webhooks/webhooks.go b/cmd/frontend/webhooks/webhooks.go index ee6f86b3e7f..1a032e53c5a 100644 --- a/cmd/frontend/webhooks/webhooks.go +++ b/cmd/frontend/webhooks/webhooks.go @@ -92,7 +92,7 @@ func NewHandler(logger log.Logger, db database.DB, gh *Router) http.Handler { type Handler func(ctx context.Context, db database.DB, codeHostURN extsvc.CodeHostBaseURL, event any) error func handler(logger log.Logger, db database.DB, wh *Router) http.HandlerFunc { - logger = logger.Scoped("webhooks.handler", "handler used to route webhooks") + logger = logger.Scoped("webhooks.handler") return func(w http.ResponseWriter, r *http.Request) { uuidString := mux.Vars(r)["webhook_uuid"] if uuidString == "" { diff --git a/cmd/gitserver/internal/cleanup.go b/cmd/gitserver/internal/cleanup.go index ef3bbe8bb81..5c1ab6280bb 100644 --- a/cmd/gitserver/internal/cleanup.go +++ b/cmd/gitserver/internal/cleanup.go @@ -67,7 +67,7 @@ func NewJanitor(ctx context.Context, cfg JanitorConfig, db database.DB, rcf *wre // ASAP. if envvar.SourcegraphDotComMode() { diskSizer := &StatDiskSizer{} - logger := logger.Scoped("dotcom-repo-cleaner", "The background janitor process to clean up repos on Sourcegraph.com that haven't been changed in a long time") + logger := logger.Scoped("dotcom-repo-cleaner") toFree, err := howManyBytesToFree(logger, cfg.ReposDir, diskSizer, cfg.DesiredPercentFree) if err != nil { logger.Error("ensuring free disk space", log.Error(err)) @@ -245,7 +245,7 @@ func cleanupRepos( cloneRepo cloneRepoFunc, gitServerAddrs gitserver.GitserverAddresses, ) { - logger = logger.Scoped("cleanup", "repositories cleanup operation") + logger = logger.Scoped("cleanup") janitorRunning.Set(1) defer janitorRunning.Set(0) @@ -682,7 +682,7 @@ func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, shardID return nil } - logger = logger.Scoped("freeUpSpace", "removes git directories under ReposDir") + logger = logger.Scoped("freeUpSpace") // Get the git directories and their mod times. gitDirs, err := findGitDirs(reposDir) diff --git a/cmd/gitserver/internal/executil/executil.go b/cmd/gitserver/internal/executil/executil.go index a6cfdb546cb..c6bdde261b0 100644 --- a/cmd/gitserver/internal/executil/executil.go +++ b/cmd/gitserver/internal/executil/executil.go @@ -255,7 +255,7 @@ func getTlsExternalDoNotInvoke() *tlsConfig { exp := conf.ExperimentalFeatures() c := exp.TlsExternal - logger := log.Scoped("tlsExternal", "Global TLS/SSL settings for Sourcegraph to use when communicating with code hosts.") + logger := log.Scoped("tlsExternal") if c == nil { return &tlsConfig{} diff --git a/cmd/gitserver/internal/git/cleanup.go b/cmd/gitserver/internal/git/cleanup.go index f4d0b39ccd6..3bcdd4702cd 100644 --- a/cmd/gitserver/internal/git/cleanup.go +++ b/cmd/gitserver/internal/git/cleanup.go @@ -20,7 +20,7 @@ import ( // very slow. Removing these files while they're in use will cause // an operation to fail, but not damage the repository. func CleanTmpPackFiles(logger log.Logger, dir common.GitDir) { - logger = logger.Scoped("cleanup.cleanTmpFiles", "tries to remove tmp_pack_* files from .git/objects/pack") + logger = logger.Scoped("cleanup.cleanTmpFiles") now := time.Now() packdir := dir.Path("objects", "pack") diff --git a/cmd/gitserver/internal/git/git.go b/cmd/gitserver/internal/git/git.go index a604947a19f..d2bea61645f 100644 --- a/cmd/gitserver/internal/git/git.go +++ b/cmd/gitserver/internal/git/git.go @@ -193,7 +193,7 @@ var badRefs = syncx.OnceValue(func() []string { // If there are no commits or the latest commit is in the future, or there is any // error, time.Now is returned. func LatestCommitTimestamp(logger log.Logger, dir common.GitDir) time.Time { - logger = logger.Scoped("LatestCommitTimestamp", "compute the timestamp of the most recent commit"). + logger = logger.Scoped("LatestCommitTimestamp"). With(log.String("repo", string(dir))) now := time.Now() // return current time if we don't find a more accurate time diff --git a/cmd/gitserver/internal/gitserverfs/initfs.go b/cmd/gitserver/internal/gitserverfs/initfs.go index 17a33ab4d08..b59c1532c36 100644 --- a/cmd/gitserver/internal/gitserverfs/initfs.go +++ b/cmd/gitserver/internal/gitserverfs/initfs.go @@ -43,7 +43,7 @@ func InitGitserverFileSystem(logger log.Logger, reposDir string) error { // setupAndClearTmp sets up the tempdir for reposDir as well as clearing it // out. It returns the temporary directory location. func setupAndClearTmp(logger log.Logger, reposDir string) (string, error) { - logger = logger.Scoped("setupAndClearTmp", "sets up the the tempdir for ReposDir as well as clearing it out") + logger = logger.Scoped("setupAndClearTmp") // Additionally, we create directories with the prefix .tmp-old which are // asynchronously removed. We do not remove in place since it may be a diff --git a/cmd/gitserver/internal/gitservice.go b/cmd/gitserver/internal/gitservice.go index 2ecf2b33f9c..0ab2edf0704 100644 --- a/cmd/gitserver/internal/gitservice.go +++ b/cmd/gitserver/internal/gitservice.go @@ -72,7 +72,7 @@ func flowrateWriter(logger log.Logger, w io.Writer) io.Writer { } func (s *Server) gitServiceHandler() *gitservice.Handler { - logger := s.Logger.Scoped("gitServiceHandler", "smart Git HTTP transfer protocol") + logger := s.Logger.Scoped("gitServiceHandler") return &gitservice.Handler{ Dir: func(d string) string { diff --git a/cmd/gitserver/internal/integration_tests/test_utils.go b/cmd/gitserver/internal/integration_tests/test_utils.go index d81e800d243..ba52d5e1e38 100644 --- a/cmd/gitserver/internal/integration_tests/test_utils.go +++ b/cmd/gitserver/internal/integration_tests/test_utils.go @@ -46,7 +46,7 @@ func InitGitserver() { // Ignore users configuration in tests os.Setenv("GIT_CONFIG_NOSYSTEM", "true") os.Setenv("HOME", "/dev/null") - logger := sglog.Scoped("gitserver_integration_tests", "") + logger := sglog.Scoped("gitserver_integration_tests") l, err := net.Listen("tcp", "127.0.0.1:0") if err != nil { @@ -74,7 +74,7 @@ func InitGitserver() { db.ReposFunc.SetDefaultReturn(r) s := server.Server{ - Logger: sglog.Scoped("server", "the gitserver service"), + Logger: sglog.Scoped("server"), ObservationCtx: &observation.TestContext, ReposDir: filepath.Join(root, "repos"), GetRemoteURLFunc: func(ctx context.Context, name api.RepoName) (string, error) { diff --git a/cmd/gitserver/internal/p4exec.go b/cmd/gitserver/internal/p4exec.go index b0371ed5850..19d0b6b1454 100644 --- a/cmd/gitserver/internal/p4exec.go +++ b/cmd/gitserver/internal/p4exec.go @@ -169,7 +169,7 @@ func (s *Server) handleP4Exec(w http.ResponseWriter, r *http.Request) { } func (s *Server) p4execHTTP(w http.ResponseWriter, r *http.Request, req *p4ExecRequest) { - logger := s.Logger.Scoped("p4exec", "") + logger := s.Logger.Scoped("p4exec") // Flush writes more aggressively than standard net/http so that clients // with a context deadline see as much partial response body as possible. diff --git a/cmd/gitserver/internal/p4exec_test.go b/cmd/gitserver/internal/p4exec_test.go index ce595af1626..ae28136015d 100644 --- a/cmd/gitserver/internal/p4exec_test.go +++ b/cmd/gitserver/internal/p4exec_test.go @@ -63,7 +63,7 @@ func TestServer_handleP4Exec(t *testing.T) { srv := httptest.NewServer(handler) u, _ := url.Parse(srv.URL) - conn, err := defaults.Dial(u.Host, logger.Scoped("gRPC client", "")) + conn, err := defaults.Dial(u.Host, logger.Scoped("gRPC client")) if err != nil { t.Fatalf("failed to dial: %v", err) } diff --git a/cmd/gitserver/internal/patch.go b/cmd/gitserver/internal/patch.go index 3901a236ea9..5af9e0858a6 100644 --- a/cmd/gitserver/internal/patch.go +++ b/cmd/gitserver/internal/patch.go @@ -56,7 +56,7 @@ func (s *Server) handleCreateCommitFromPatchBinary(w http.ResponseWriter, r *htt } func (s *Server) createCommitFromPatch(ctx context.Context, req protocol.CreateCommitFromPatchRequest) (int, protocol.CreateCommitFromPatchResponse) { - logger := s.Logger.Scoped("createCommitFromPatch", ""). + logger := s.Logger.Scoped("createCommitFromPatch"). With( log.String("repo", string(req.Repo)), log.String("baseCommit", string(req.BaseCommit)), @@ -426,7 +426,7 @@ func (s *Server) shelveChangelist(ctx context.Context, req protocol.CreateCommit _, _, _, p4depot, _ = perforce.DecomposePerforceRemoteURL(remoteURL) } - logger := s.Logger.Scoped("shelveChangelist", ""). + logger := s.Logger.Scoped("shelveChangelist"). With( log.String("repo", repo), log.String("baseCommit", baseCommit), diff --git a/cmd/gitserver/internal/perforce/perforce.go b/cmd/gitserver/internal/perforce/perforce.go index 217e61bdd65..9f942645903 100644 --- a/cmd/gitserver/internal/perforce/perforce.go +++ b/cmd/gitserver/internal/perforce/perforce.go @@ -125,7 +125,7 @@ func (s *Service) changelistMappingProducer(ctx context.Context, tasks chan<- *c // changelistMappingConsumer "consumes" jobs "produced" by the producer. func (s *Service) changelistMappingConsumer(ctx context.Context, tasks <-chan *changelistMappingTask) { - logger := s.Logger.Scoped("changelistMappingConsumer", "process perforce changelist mapping jobs") + logger := s.Logger.Scoped("changelistMappingConsumer") // Process only one job at a time for a simpler pipeline at the moment. for task := range tasks { @@ -154,7 +154,7 @@ func (s *Service) changelistMappingConsumer(ctx context.Context, tasks <-chan *c // doChangelistMapping performs the commits -> changelist ID mapping for a new or existing repo. func (s *Service) doChangelistMapping(ctx context.Context, job *changelistMappingJob) error { - logger := s.Logger.Scoped("doChangelistMapping", "").With( + logger := s.Logger.Scoped("doChangelistMapping").With( log.String("repo", string(job.RepoName)), ) diff --git a/cmd/gitserver/internal/server.go b/cmd/gitserver/internal/server.go index b4b84ca074c..97010bb7f85 100644 --- a/cmd/gitserver/internal/server.go +++ b/cmd/gitserver/internal/server.go @@ -230,7 +230,7 @@ func shortGitCommandSlow(args []string) time.Duration { // https://github.com/sourcegraph/sourcegraph/pull/27931. func headerXRequestedWithMiddleware(next http.Handler) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - l := log.Scoped("gitserver", "headerXRequestedWithMiddleware") + l := log.Scoped("gitserver") // Do not apply the middleware to /ping and /git endpoints. // @@ -280,19 +280,19 @@ func (s *Server) Handler() http.Handler { mux := http.NewServeMux() mux.HandleFunc("/archive", trace.WithRouteName("archive", accesslog.HTTPMiddleware( - s.Logger.Scoped("archive.accesslog", "archive endpoint access log"), + s.Logger.Scoped("archive.accesslog"), conf.DefaultClient(), s.handleArchive, ))) mux.HandleFunc("/exec", trace.WithRouteName("exec", accesslog.HTTPMiddleware( - s.Logger.Scoped("exec.accesslog", "exec endpoint access log"), + s.Logger.Scoped("exec.accesslog"), conf.DefaultClient(), s.handleExec, ))) mux.HandleFunc("/search", trace.WithRouteName("search", s.handleSearch)) mux.HandleFunc("/batch-log", trace.WithRouteName("batch-log", s.handleBatchLog)) mux.HandleFunc("/p4-exec", trace.WithRouteName("p4-exec", accesslog.HTTPMiddleware( - s.Logger.Scoped("p4-exec.accesslog", "p4-exec endpoint access log"), + s.Logger.Scoped("p4-exec.accesslog"), conf.DefaultClient(), s.handleP4Exec, ))) @@ -325,7 +325,7 @@ func (s *Server) Handler() http.Handler { // scaling events and the new destination gitserver replica can directly clone from // the gitserver replica which hosts the repository currently. mux.HandleFunc("/git/", trace.WithRouteName("git", accesslog.HTTPMiddleware( - s.Logger.Scoped("git.accesslog", "git endpoint access log"), + s.Logger.Scoped("git.accesslog"), conf.DefaultClient(), func(rw http.ResponseWriter, r *http.Request) { http.StripPrefix("/git", s.gitServiceHandler()).ServeHTTP(rw, r) @@ -412,7 +412,7 @@ func (p *clonePipelineRoutine) cloneJobProducer(ctx context.Context, tasks chan< } func (p *clonePipelineRoutine) cloneJobConsumer(ctx context.Context, tasks <-chan *cloneTask) { - logger := p.s.Logger.Scoped("cloneJobConsumer", "process clone jobs") + logger := p.s.Logger.Scoped("cloneJobConsumer") for task := range tasks { logger := logger.With(log.String("job.repo", string(task.repo))) @@ -581,7 +581,7 @@ func (s *Server) handleRepoUpdate(w http.ResponseWriter, r *http.Request) { } func (s *Server) repoUpdate(req *protocol.RepoUpdateRequest) protocol.RepoUpdateResponse { - logger := s.Logger.Scoped("handleRepoUpdate", "synchronous http handler for repo updates") + logger := s.Logger.Scoped("handleRepoUpdate") var resp protocol.RepoUpdateResponse req.Repo = protocol.NormalizeRepo(req.Repo) dir := gitserverfs.RepoDirFromName(s.ReposDir, req.Repo) @@ -642,7 +642,7 @@ func (s *Server) repoUpdate(req *protocol.RepoUpdateRequest) protocol.RepoUpdate // time out) call to clone a repository. // Asynchronous errors will have to be checked in the gitserver_repos table under last_error. func (s *Server) handleRepoClone(w http.ResponseWriter, r *http.Request) { - logger := s.Logger.Scoped("handleRepoClone", "asynchronous http handler for repo clones") + logger := s.Logger.Scoped("handleRepoClone") var req protocol.RepoCloneRequest if err := json.NewDecoder(r.Body).Decode(&req); err != nil { http.Error(w, err.Error(), http.StatusBadRequest) @@ -665,7 +665,7 @@ func (s *Server) handleRepoClone(w http.ResponseWriter, r *http.Request) { func (s *Server) handleArchive(w http.ResponseWriter, r *http.Request) { var ( - logger = s.Logger.Scoped("handleArchive", "http handler for repo archive") + logger = s.Logger.Scoped("handleArchive") q = r.URL.Query() treeish = q.Get("treeish") repo = q.Get("repo") @@ -722,7 +722,7 @@ func (s *Server) handleArchive(w http.ResponseWriter, r *http.Request) { } func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) { - logger := s.Logger.Scoped("handleSearch", "http handler for search") + logger := s.Logger.Scoped("handleSearch") tr, ctx := trace.New(r.Context(), "handleSearch") defer tr.End() @@ -1182,7 +1182,7 @@ func (s *Server) exec(ctx context.Context, logger log.Logger, req *protocol.Exec // execHTTP translates the results of an exec into the expected HTTP statuses and payloads func (s *Server) execHTTP(w http.ResponseWriter, r *http.Request, req *protocol.ExecRequest) { - logger := s.Logger.Scoped("exec", "").With(log.Strings("req.Args", req.Args)) + logger := s.Logger.Scoped("exec").With(log.Strings("req.Args", req.Args)) // Flush writes more aggressively than standard net/http so that clients // with a context deadline see as much partial response body as possible. @@ -1424,7 +1424,7 @@ func (s *Server) doClone( remoteURL *vcs.URL, opts CloneOptions, ) (err error) { - logger := s.Logger.Scoped("doClone", "").With(log.String("repo", string(repo))) + logger := s.Logger.Scoped("doClone").With(log.String("repo", string(repo))) defer lock.Release() defer func() { @@ -1779,7 +1779,7 @@ func (s *Server) doRepoUpdate(ctx context.Context, repo api.RepoName, revspec st var doBackgroundRepoUpdateMock func(api.RepoName) error func (s *Server) doBackgroundRepoUpdate(repo api.RepoName, revspec string) error { - logger := s.Logger.Scoped("backgroundRepoUpdate", "").With(log.String("repo", string(repo))) + logger := s.Logger.Scoped("backgroundRepoUpdate").With(log.String("repo", string(repo))) if doBackgroundRepoUpdateMock != nil { return doBackgroundRepoUpdateMock(repo) diff --git a/cmd/gitserver/internal/servermetrics.go b/cmd/gitserver/internal/servermetrics.go index 98d2240ff32..e488ca26140 100644 --- a/cmd/gitserver/internal/servermetrics.go +++ b/cmd/gitserver/internal/servermetrics.go @@ -80,7 +80,7 @@ func registerEchoMetric(logger log.Logger) { }) prometheus.MustRegister(echoDuration) go func() { - logger = logger.Scoped("echoMetricReporter", "") + logger = logger.Scoped("echoMetricReporter") for { time.Sleep(10 * time.Second) s := time.Now() diff --git a/cmd/gitserver/internal/sshagent/ssh_agent.go b/cmd/gitserver/internal/sshagent/ssh_agent.go index bc9d4ccdeb4..88606fd3b23 100644 --- a/cmd/gitserver/internal/sshagent/ssh_agent.go +++ b/cmd/gitserver/internal/sshagent/ssh_agent.go @@ -57,7 +57,7 @@ func New(logger log.Logger, raw, passphrase []byte) (*sshAgent, error) { // Set up the type we're going to return. a := &sshAgent{ - logger: logger.Scoped("sshAgent", "speaks the ssh-agent protocol and can be used by gitserver"), + logger: logger.Scoped("sshAgent"), l: l, sock: socketName, keyring: keyring, diff --git a/cmd/gitserver/internal/vcssyncer/customfetch.go b/cmd/gitserver/internal/vcssyncer/customfetch.go index 9d1f5f89d7e..f6a5f48e0c9 100644 --- a/cmd/gitserver/internal/vcssyncer/customfetch.go +++ b/cmd/gitserver/internal/vcssyncer/customfetch.go @@ -24,7 +24,7 @@ var enableCustomGitFetch = env.Get("ENABLE_CUSTOM_GIT_FETCH", "false", "Enable c func buildCustomFetchMappings(c []*schema.CustomGitFetchMapping) map[string][]string { // this is an edge case where a CustomGitFetchMapping has been made but enableCustomGitFetch is false if c != nil && enableCustomGitFetch == "false" { - logger := log.Scoped("customfetch", "") + logger := log.Scoped("customfetch") logger.Warn("a CustomGitFetchMapping is configured but ENABLE_CUSTOM_GIT_FETCH is not set") return map[string][]string{} diff --git a/cmd/gitserver/internal/vcssyncer/go_modules.go b/cmd/gitserver/internal/vcssyncer/go_modules.go index a5abb5f922e..7fd651adf31 100644 --- a/cmd/gitserver/internal/vcssyncer/go_modules.go +++ b/cmd/gitserver/internal/vcssyncer/go_modules.go @@ -33,7 +33,7 @@ func NewGoModulesSyncer( } return &vcsPackagesSyncer{ - logger: log.Scoped("GoModulesSyncer", "sync Go modules"), + logger: log.Scoped("GoModulesSyncer"), typ: "go_modules", scheme: dependencies.GoPackagesScheme, placeholder: placeholder, diff --git a/cmd/gitserver/internal/vcssyncer/jvm_packages.go b/cmd/gitserver/internal/vcssyncer/jvm_packages.go index bbde2de9e34..aa78ef01635 100644 --- a/cmd/gitserver/internal/vcssyncer/jvm_packages.go +++ b/cmd/gitserver/internal/vcssyncer/jvm_packages.go @@ -42,10 +42,10 @@ func NewJVMPackagesSyncer(connection *schema.JVMPackagesConnection, svc *depende panic(fmt.Sprintf("expected placeholder package to parse but got %v", err)) } - chandle := coursier.NewCoursierHandle(observation.NewContext(log.Scoped("gitserver.jvmsyncer", "")), cacheDir) + chandle := coursier.NewCoursierHandle(observation.NewContext(log.Scoped("gitserver.jvmsyncer")), cacheDir) return &vcsPackagesSyncer{ - logger: log.Scoped("JVMPackagesSyncer", "sync JVM packages"), + logger: log.Scoped("JVMPackagesSyncer"), typ: "jvm_packages", scheme: dependencies.JVMPackagesScheme, placeholder: placeholder, @@ -124,7 +124,7 @@ func (s *jvmPackagesSyncer) Download(ctx context.Context, dir string, dep reposo } func unzipJarFile(jarPath, destination string) (err error) { - logger := log.Scoped("unzipJarFile", "unzipJarFile unpacks the given jvm archive into workDir") + logger := log.Scoped("unzipJarFile") workDir := strings.TrimSuffix(destination, string(os.PathSeparator)) + string(os.PathSeparator) zipFile, err := os.ReadFile(jarPath) diff --git a/cmd/gitserver/internal/vcssyncer/npm_packages.go b/cmd/gitserver/internal/vcssyncer/npm_packages.go index 63ab1f516d8..6451f04ae19 100644 --- a/cmd/gitserver/internal/vcssyncer/npm_packages.go +++ b/cmd/gitserver/internal/vcssyncer/npm_packages.go @@ -33,7 +33,7 @@ func NewNpmPackagesSyncer( } return &vcsPackagesSyncer{ - logger: log.Scoped("NPMPackagesSyncer", "sync NPM packages"), + logger: log.Scoped("NPMPackagesSyncer"), typ: "npm_packages", scheme: dependencies.NpmPackagesScheme, placeholder: placeholder, @@ -125,7 +125,7 @@ func (s *npmPackagesSyncer) Download(ctx context.Context, dir string, dep reposo // Additionally, if all the files in the tarball have paths of the form // dir/ for the same directory 'dir', the 'dir' will be stripped. func decompressTgz(tgz io.Reader, destination string) error { - logger := log.Scoped("decompressTgz", "Decompress a tarball at tgzPath, putting the files under destination.") + logger := log.Scoped("decompressTgz") err := unpack.Tgz(tgz, destination, unpack.Opts{ SkipInvalid: true, diff --git a/cmd/gitserver/internal/vcssyncer/python_packages.go b/cmd/gitserver/internal/vcssyncer/python_packages.go index df3d53e53b9..0785678026e 100644 --- a/cmd/gitserver/internal/vcssyncer/python_packages.go +++ b/cmd/gitserver/internal/vcssyncer/python_packages.go @@ -28,7 +28,7 @@ func NewPythonPackagesSyncer( reposDir string, ) VCSSyncer { return &vcsPackagesSyncer{ - logger: log.Scoped("PythonPackagesSyncer", "sync Python packages"), + logger: log.Scoped("PythonPackagesSyncer"), typ: "python_packages", scheme: dependencies.PythonPackagesScheme, placeholder: reposource.ParseVersionedPackage("sourcegraph.com/placeholder@v0.0.0"), @@ -84,7 +84,7 @@ func (s *pythonPackagesSyncer) Download(ctx context.Context, dir string, dep rep // files that aren't valid or that are potentially malicious. It detects the kind of archive // and compression used with the given packageURL. func unpackPythonPackage(pkg io.Reader, packageURL, reposDir, workDir string) error { - logger := log.Scoped("unpackPythonPackage", "unpackPythonPackage unpacks the given python package archive into workDir") + logger := log.Scoped("unpackPythonPackage") u, err := url.Parse(packageURL) if err != nil { return errors.Wrap(err, "bad python package URL") diff --git a/cmd/gitserver/internal/vcssyncer/ruby_packages.go b/cmd/gitserver/internal/vcssyncer/ruby_packages.go index fb4db3da533..4db4a0c5877 100644 --- a/cmd/gitserver/internal/vcssyncer/ruby_packages.go +++ b/cmd/gitserver/internal/vcssyncer/ruby_packages.go @@ -26,7 +26,7 @@ func NewRubyPackagesSyncer( client *rubygems.Client, ) VCSSyncer { return &vcsPackagesSyncer{ - logger: log.Scoped("RubyPackagesSyncer", "sync Ruby packages"), + logger: log.Scoped("RubyPackagesSyncer"), typ: "ruby_packages", scheme: dependencies.RubyPackagesScheme, placeholder: reposource.NewRubyVersionedPackage("sourcegraph/placeholder", "0.0.0"), diff --git a/cmd/gitserver/internal/vcssyncer/rust_packages.go b/cmd/gitserver/internal/vcssyncer/rust_packages.go index 7d2d3e780cd..eb0ebb765d7 100644 --- a/cmd/gitserver/internal/vcssyncer/rust_packages.go +++ b/cmd/gitserver/internal/vcssyncer/rust_packages.go @@ -23,7 +23,7 @@ func NewRustPackagesSyncer( client *crates.Client, ) VCSSyncer { return &vcsPackagesSyncer{ - logger: log.Scoped("RustPackagesSyncer", "sync Rust packages"), + logger: log.Scoped("RustPackagesSyncer"), typ: "rust_packages", scheme: dependencies.RustPackagesScheme, placeholder: reposource.ParseRustVersionedPackage("sourcegraph.com/placeholder@0.0.0"), diff --git a/cmd/gitserver/shared/shared.go b/cmd/gitserver/shared/shared.go index 42896c49fd4..f1ba88c7733 100644 --- a/cmd/gitserver/shared/shared.go +++ b/cmd/gitserver/shared/shared.go @@ -216,10 +216,10 @@ func makeGRPCServer(logger log.Logger, s *server.Server) *grpc.Server { var additionalServerOptions []grpc.ServerOption for method, scopedLogger := range map[string]log.Logger{ - proto.GitserverService_Exec_FullMethodName: logger.Scoped("exec.accesslog", "exec endpoint access log"), - proto.GitserverService_Archive_FullMethodName: logger.Scoped("archive.accesslog", "archive endpoint access log"), - proto.GitserverService_P4Exec_FullMethodName: logger.Scoped("p4exec.accesslog", "p4-exec endpoint access log"), - proto.GitserverService_GetObject_FullMethodName: logger.Scoped("get-object.accesslog", "get-object endpoint access log"), + proto.GitserverService_Exec_FullMethodName: logger.Scoped("exec.accesslog"), + proto.GitserverService_Archive_FullMethodName: logger.Scoped("archive.accesslog"), + proto.GitserverService_P4Exec_FullMethodName: logger.Scoped("p4exec.accesslog"), + proto.GitserverService_GetObject_FullMethodName: logger.Scoped("get-object.accesslog"), } { streamInterceptor := accesslog.StreamServerInterceptor(scopedLogger, configurationWatcher) unaryInterceptor := accesslog.UnaryServerInterceptor(scopedLogger, configurationWatcher) @@ -278,7 +278,7 @@ func getRemoteURLFunc( return "", err } - return cloneurl.ForEncryptableConfig(ctx, logger.Scoped("repos.CloneURL", ""), db, svc.Kind, svc.Config, r) + return cloneurl.ForEncryptableConfig(ctx, logger.Scoped("repos.CloneURL"), db, svc.Kind, svc.Config, r) } return "", errors.Errorf("no sources for %q", repo) } diff --git a/cmd/loadtest/main.go b/cmd/loadtest/main.go index cbb99f95c3f..10de2fafe74 100644 --- a/cmd/loadtest/main.go +++ b/cmd/loadtest/main.go @@ -30,7 +30,7 @@ func main() { sanitycheck.Pass() log.Init(log.Resource{Name: "loadtest"}) - logger := log.Scoped("loadtest", "") + logger := log.Scoped("loadtest") if err := run(logger); err != nil { logger.Fatal("run failed", log.Error(err)) diff --git a/cmd/migrator/main.go b/cmd/migrator/main.go index 226b644877a..9cbaeec0af7 100644 --- a/cmd/migrator/main.go +++ b/cmd/migrator/main.go @@ -18,7 +18,7 @@ func main() { }) defer liblog.Sync() - logger := log.Scoped("migrator", "") + logger := log.Scoped("migrator") if err := shared.Start(logger, register.RegisterEnterpriseMigratorsUsingConfAndStoreFactory); err != nil { logger.Fatal(err.Error()) diff --git a/cmd/pings/shared/metrics.go b/cmd/pings/shared/metrics.go index b452bbbf83f..ea6528bd948 100644 --- a/cmd/pings/shared/metrics.go +++ b/cmd/pings/shared/metrics.go @@ -25,7 +25,7 @@ func initOpenTelemetry(ctx context.Context, logger log.Logger, config OpenTeleme } shutdownMetrics, err := maybeEnableMetrics(ctx, - logger.Scoped("metrics", "OpenTelemetry metrics"), + logger.Scoped("metrics"), config, res) if err != nil { return nil, errors.Wrap(err, "maybeEnableMetrics") diff --git a/cmd/precise-code-intel-worker/shared/shared.go b/cmd/precise-code-intel-worker/shared/shared.go index 3f5a04907a9..dc137164e7c 100644 --- a/cmd/precise-code-intel-worker/shared/shared.go +++ b/cmd/precise-code-intel-worker/shared/shared.go @@ -105,7 +105,7 @@ func mustInitializeDB(observationCtx *observation.Context) *sql.DB { }) sqlDB, err := connections.EnsureNewFrontendDB(observationCtx, dsn, "precise-code-intel-worker") if err != nil { - log.Scoped("init db", "Initialize fontend database").Fatal("Failed to connect to frontend database", log.Error(err)) + log.Scoped("init db").Fatal("Failed to connect to frontend database", log.Error(err)) } // @@ -132,7 +132,7 @@ func mustInitializeCodeIntelDB(observationCtx *observation.Context) codeintelsha }) db, err := connections.EnsureNewCodeIntelDB(observationCtx, dsn, "precise-code-intel-worker") if err != nil { - log.Scoped("init db", "Initialize codeintel database.").Fatal("Failed to connect to codeintel database", log.Error(err)) + log.Scoped("init db").Fatal("Failed to connect to codeintel database", log.Error(err)) } return codeintelshared.NewCodeIntelDB(observationCtx.Logger, db) diff --git a/cmd/repo-updater/internal/authz/perms_syncer.go b/cmd/repo-updater/internal/authz/perms_syncer.go index 75433ae62c6..255a770c920 100644 --- a/cmd/repo-updater/internal/authz/perms_syncer.go +++ b/cmd/repo-updater/internal/authz/perms_syncer.go @@ -106,7 +106,7 @@ func (s *PermsSyncer) syncRepoPerms(ctx context.Context, repoID api.RepoID, noPe } } - logger := s.logger.Scoped("syncRepoPerms", "processes permissions syncing request in a repo-centric way").With( + logger := s.logger.Scoped("syncRepoPerms").With( log.Object("repo", log.Int32("ID", int32(repo.ID)), log.String("name", string(repo.Name)), @@ -265,7 +265,7 @@ func (s *PermsSyncer) syncUserPerms(ctx context.Context, userID int32, noPerms b return nil, nil, errors.Wrap(err, "get user") } - logger := s.logger.Scoped("syncUserPerms", "processes permissions sync request in user-centric way").With( + logger := s.logger.Scoped("syncUserPerms").With( log.Object("user", log.Int32("ID", userID), log.String("name", user.Username)), @@ -432,7 +432,7 @@ func (s *PermsSyncer) fetchUserPermsViaExternalAccounts(ctx context.Context, use byServiceID := s.providersByServiceID() accounts := s.db.UserExternalAccounts() - logger := s.logger.Scoped("fetchUserPermsViaExternalAccounts", "sync permissions using external accounts (logging connections)").With(log.Int32("userID", user.ID)) + logger := s.logger.Scoped("fetchUserPermsViaExternalAccounts").With(log.Int32("userID", user.ID)) // Check if the user has an external account for every authz provider respectively, // and try to fetch the account when not. @@ -686,7 +686,7 @@ func (s *PermsSyncer) listPrivateRepoNamesBySpecs(ctx context.Context, repoSpecs } func (s *PermsSyncer) saveUserPermsForAccount(ctx context.Context, userID int32, acctID int32, repoIDs []int32) (*database.SetPermissionsResult, error) { - logger := s.logger.Scoped("saveUserPermsForAccount", "saves permissions per external account").With( + logger := s.logger.Scoped("saveUserPermsForAccount").With( log.Object("user", log.Int32("ID", userID), log.Int32("ExternalAccountID", acctID)), diff --git a/cmd/repo-updater/internal/authz/perms_syncer_worker.go b/cmd/repo-updater/internal/authz/perms_syncer_worker.go index 4274f374af6..58d4a72e2a7 100644 --- a/cmd/repo-updater/internal/authz/perms_syncer_worker.go +++ b/cmd/repo-updater/internal/authz/perms_syncer_worker.go @@ -27,9 +27,9 @@ const ( ) func MakePermsSyncerWorker(observationCtx *observation.Context, syncer permsSyncer, syncType syncType, jobsStore database.PermissionSyncJobStore) *permsSyncerWorker { - logger := observationCtx.Logger.Scoped("RepoPermsSyncerWorkerRepo", "Repository permissions sync worker") + logger := observationCtx.Logger.Scoped("RepoPermsSyncerWorkerRepo") if syncType == SyncTypeUser { - logger = observationCtx.Logger.Scoped("UserPermsSyncerWorker", "User permissions sync worker") + logger = observationCtx.Logger.Scoped("UserPermsSyncerWorker") } return &permsSyncerWorker{ logger: logger, diff --git a/cmd/repo-updater/shared/main.go b/cmd/repo-updater/shared/main.go index f95a200177c..5cea4cee89d 100644 --- a/cmd/repo-updater/shared/main.go +++ b/cmd/repo-updater/shared/main.go @@ -93,16 +93,16 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic // bit more to do in this method, though, and the process will be marked ready // further down this function. - repos.MustRegisterMetrics(log.Scoped("MustRegisterMetrics", ""), db, envvar.SourcegraphDotComMode()) + repos.MustRegisterMetrics(log.Scoped("MustRegisterMetrics"), db, envvar.SourcegraphDotComMode()) - store := repos.NewStore(logger.Scoped("store", "repo store"), db) + store := repos.NewStore(logger.Scoped("store"), db) { m := repos.NewStoreMetrics() m.MustRegister(prometheus.DefaultRegisterer) store.SetMetrics(m) } - sourcerLogger := logger.Scoped("repos.Sourcer", "repositories source") + sourcerLogger := logger.Scoped("repos.Sourcer") cf := httpcli.NewExternalClientFactory( httpcli.NewLoggingMiddleware(sourcerLogger), ) @@ -133,7 +133,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic go watchSyncer(ctx, logger, syncer, updateScheduler, server.ChangesetSyncRegistry) permsSyncer := authz.NewPermsSyncer( - observationCtx.Logger.Scoped("PermsSyncer", "repository and user permissions syncer"), + observationCtx.Logger.Scoped("PermsSyncer"), db, store, database.Perms(observationCtx.Logger, db, timeutil.Now), @@ -152,7 +152,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic // separate name for logging and metrics. authz.MakeResetter(observationCtx, repoWorkerStore), newUnclonedReposManager(ctx, logger, envvar.SourcegraphDotComMode(), updateScheduler, store), - repos.NewPhabricatorRepositorySyncWorker(ctx, db, log.Scoped("PhabricatorRepositorySyncWorker", ""), store), + repos.NewPhabricatorRepositorySyncWorker(ctx, db, log.Scoped("PhabricatorRepositorySyncWorker"), store), // Run git fetches scheduler updateScheduler, } @@ -176,7 +176,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic if disabled, _ := strconv.ParseBool(os.Getenv("DISABLE_REPO_PURGE")); disabled { logger.Info("repository purger is disabled via env DISABLE_REPO_PURGE") } else { - routines = append(routines, repos.NewRepositoryPurgeWorker(ctx, log.Scoped("repoPurgeWorker", "remove deleted repositories"), db, conf.DefaultClient())) + routines = append(routines, repos.NewRepositoryPurgeWorker(ctx, log.Scoped("repoPurgeWorker"), db, conf.DefaultClient())) } // Register recorder in all routines that support it. @@ -316,7 +316,7 @@ func manualPurgeHandler(db database.DB) http.HandlerFunc { return } } - err = repos.PurgeOldestRepos(log.Scoped("PurgeOldestRepos", ""), db, limit, perSecond) + err = repos.PurgeOldestRepos(log.Scoped("PurgeOldestRepos"), db, limit, perSecond) if err != nil { http.Error(w, fmt.Sprintf("starting manual purge: %v", err), http.StatusInternalServerError) return diff --git a/cmd/searcher/internal/search/mmap.go b/cmd/searcher/internal/search/mmap.go index 9f7bc50a8ed..e6cc7fe229e 100644 --- a/cmd/searcher/internal/search/mmap.go +++ b/cmd/searcher/internal/search/mmap.go @@ -20,7 +20,7 @@ func mmap(path string, f *os.File, fi fs.FileInfo) ([]byte, error) { } if err := unix.Madvise(data, syscall.MADV_SEQUENTIAL); err != nil { // best effort at optimization, so only log failures here - log.Scoped("mmap", "").Info("failed to madvise", log.String("path", path), log.Error(err)) + log.Scoped("mmap").Info("failed to madvise", log.String("path", path), log.Error(err)) } return data, nil diff --git a/cmd/searcher/internal/search/search.go b/cmd/searcher/internal/search/search.go index eb761831418..5ba4221e665 100644 --- a/cmd/searcher/internal/search/search.go +++ b/cmd/searcher/internal/search/search.go @@ -231,7 +231,7 @@ func (s *Service) search(ctx context.Context, p *protocol.Request, sender matchS // structural search. hybrid := !p.IsStructuralPat if hybrid { - logger := logWithTrace(ctx, s.Log).Scoped("hybrid", "hybrid indexed and unindexed search").With( + logger := logWithTrace(ctx, s.Log).Scoped("hybrid").With( log.String("repo", string(p.Repo)), log.String("commit", string(p.Commit)), ) diff --git a/cmd/searcher/shared/shared.go b/cmd/searcher/shared/shared.go index a4184e3748f..ba05e0d812f 100644 --- a/cmd/searcher/shared/shared.go +++ b/cmd/searcher/shared/shared.go @@ -124,7 +124,7 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi } // Explicitly don't scope Store logger under the parent logger - storeObservationCtx := observation.NewContext(log.Scoped("Store", "searcher archives store")) + storeObservationCtx := observation.NewContext(log.Scoped("Store")) git := gitserver.NewClient() diff --git a/cmd/server/shared/observability.go b/cmd/server/shared/observability.go index 2e6b4182159..0212d751412 100644 --- a/cmd/server/shared/observability.go +++ b/cmd/server/shared/observability.go @@ -12,7 +12,7 @@ const grafanaProcLine = `grafana: /usr/share/grafana/bin/grafana-server -config func maybeObservability() []string { if os.Getenv("DISABLE_OBSERVABILITY") != "" { - log.Scoped("server.observability", "").Info("WARNING: Running with observability disabled") + log.Scoped("server.observability").Info("WARNING: Running with observability disabled") return []string{""} } diff --git a/cmd/server/shared/shared.go b/cmd/server/shared/shared.go index 0555cd0dc6d..4c14708cb0d 100644 --- a/cmd/server/shared/shared.go +++ b/cmd/server/shared/shared.go @@ -84,7 +84,7 @@ func Main() { }) defer liblog.Sync() - logger := sglog.Scoped("server", "Sourcegraph server") + logger := sglog.Scoped("server") // Ensure CONFIG_DIR and DATA_DIR diff --git a/cmd/symbols/internal/api/handler.go b/cmd/symbols/internal/api/handler.go index 00a9a941c1e..21913d75f4a 100644 --- a/cmd/symbols/internal/api/handler.go +++ b/cmd/symbols/internal/api/handler.go @@ -90,7 +90,7 @@ func NewHandler( return searchFunc(ctx, args) } - rootLogger := logger.Scoped("symbolsServer", "symbols RPC server") + rootLogger := logger.Scoped("symbolsServer") // Initialize the gRPC server grpcServer := defaults.NewServer(rootLogger) @@ -98,10 +98,10 @@ func NewHandler( searchFunc: searchFuncWrapper, readFileFunc: readFileFunc, ctagsBinary: ctagsBinary, - logger: rootLogger.Scoped("grpc", "grpc server implementation"), + logger: rootLogger.Scoped("grpc"), }) - jsonLogger := rootLogger.Scoped("jsonrpc", "json server implementation") + jsonLogger := rootLogger.Scoped("jsonrpc") // Initialize the legacy JSON API server mux := http.NewServeMux() diff --git a/cmd/symbols/parser/parser.go b/cmd/symbols/parser/parser.go index cc1b021bed0..7abad359492 100644 --- a/cmd/symbols/parser/parser.go +++ b/cmd/symbols/parser/parser.go @@ -272,7 +272,7 @@ func shouldPersistEntry(e *ctags.Entry) bool { } func SpawnCtags(logger log.Logger, ctagsConfig types.CtagsConfig, source ctags_config.ParserType) (ctags.Parser, error) { - logger = logger.Scoped("ctags", "ctags processes") + logger = logger.Scoped("ctags") var options ctags.Options if source == ctags_config.UniversalCtags { diff --git a/cmd/symbols/shared/setup.go b/cmd/symbols/shared/setup.go index 801a8f0ad90..c3cb6793edf 100644 --- a/cmd/symbols/shared/setup.go +++ b/cmd/symbols/shared/setup.go @@ -123,11 +123,11 @@ func loadRockskipConfig(baseConfig env.BaseConfig, ctags types.CtagsConfig, repo } func setupRockskip(observationCtx *observation.Context, config rockskipConfig, gitserverClient symbolsGitserver.GitserverClient, repositoryFetcher fetcher.RepositoryFetcher) (types.SearchFunc, func(http.ResponseWriter, *http.Request), string, error) { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("rockskip", "rockskip-based symbols"), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("rockskip"), observationCtx) codeintelDB := mustInitializeCodeIntelDB(observationCtx) createParser := func() (ctags.Parser, error) { - return symbolsParser.SpawnCtags(log.Scoped("parser", "ctags parser"), config.Ctags, ctags_config.UniversalCtags) + return symbolsParser.SpawnCtags(log.Scoped("parser"), config.Ctags, ctags_config.UniversalCtags) } server, err := rockskip.NewService(codeintelDB, gitserverClient, repositoryFetcher, createParser, config.MaxConcurrentlyIndexing, config.MaxRepos, config.LogQueries, config.IndexRequestsQueueSize, config.SymbolsCacheSize, config.PathSymbolsCacheSize, config.SearchLastIndexedCommit) if err != nil { diff --git a/cmd/symbols/shared/sqlite.go b/cmd/symbols/shared/sqlite.go index d21e4ccdcbf..b8b205a6944 100644 --- a/cmd/symbols/shared/sqlite.go +++ b/cmd/symbols/shared/sqlite.go @@ -37,7 +37,7 @@ func LoadConfig() { var config types.SqliteConfig func SetupSqlite(observationCtx *observation.Context, db database.DB, gitserverClient gitserver.GitserverClient, repositoryFetcher fetcher.RepositoryFetcher) (types.SearchFunc, func(http.ResponseWriter, *http.Request), []goroutine.BackgroundRoutine, string, error) { - logger := observationCtx.Logger.Scoped("sqlite.setup", "SQLite setup") + logger := observationCtx.Logger.Scoped("sqlite.setup") if err := baseConfig.Validate(); err != nil { logger.Fatal("failed to load configuration", log.Error(err)) diff --git a/cmd/telemetry-gateway/internal/diagnosticsserver/diagnosticsserver.go b/cmd/telemetry-gateway/internal/diagnosticsserver/diagnosticsserver.go index 9e36ba3cbf6..509b9447caa 100644 --- a/cmd/telemetry-gateway/internal/diagnosticsserver/diagnosticsserver.go +++ b/cmd/telemetry-gateway/internal/diagnosticsserver/diagnosticsserver.go @@ -21,7 +21,7 @@ func NewDiagnosticsHandler( secret string, healthCheck func(context.Context) error, ) http.Handler { - baseLogger = baseLogger.Scoped("diagnostics", "healthz checks") + baseLogger = baseLogger.Scoped("diagnostics") hasValidSecret := func(w http.ResponseWriter, r *http.Request) (yes bool) { token, err := authbearer.ExtractBearer(r.Header) diff --git a/cmd/telemetry-gateway/internal/server/server.go b/cmd/telemetry-gateway/internal/server/server.go index f7076460ca3..9bda5b4c8d6 100644 --- a/cmd/telemetry-gateway/internal/server/server.go +++ b/cmd/telemetry-gateway/internal/server/server.go @@ -39,7 +39,7 @@ func New(logger log.Logger, eventsTopic pubsub.TopicClient) (*Server, error) { } return &Server{ - logger: logger.Scoped("server", "grpc server"), + logger: logger.Scoped("server"), eventsTopic: eventsTopic, recordEventsMetrics: m, diff --git a/cmd/telemetry-gateway/shared/main.go b/cmd/telemetry-gateway/shared/main.go index 3c2e3db13b6..3824ddf22d4 100644 --- a/cmd/telemetry-gateway/shared/main.go +++ b/cmd/telemetry-gateway/shared/main.go @@ -96,14 +96,14 @@ func initOpenTelemetry(ctx context.Context, logger log.Logger, config OpenTeleme // Enable tracing, at this point tracing wouldn't have been enabled yet because // we run without conf which means Sourcegraph tracing is not enabled. shutdownTracing, err := maybeEnableTracing(ctx, - logger.Scoped("tracing", "OpenTelemetry tracing"), + logger.Scoped("tracing"), config, res) if err != nil { return nil, errors.Wrap(err, "maybeEnableTracing") } shutdownMetrics, err := maybeEnableMetrics(ctx, - logger.Scoped("metrics", "OpenTelemetry metrics"), + logger.Scoped("metrics"), config, res) if err != nil { return nil, errors.Wrap(err, "maybeEnableMetrics") diff --git a/cmd/worker/internal/batches/bulk_operation_processor_job.go b/cmd/worker/internal/batches/bulk_operation_processor_job.go index 91c06f8a5a1..7a7b66d433f 100644 --- a/cmd/worker/internal/batches/bulk_operation_processor_job.go +++ b/cmd/worker/internal/batches/bulk_operation_processor_job.go @@ -28,7 +28,7 @@ func (j *bulkOperationProcessorJob) Config() []env.Config { } func (j *bulkOperationProcessorJob) Routines(_ context.Context, observationCtx *observation.Context) ([]goroutine.BackgroundRoutine, error) { - observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines", "bulk operation processor job routines")) + observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines")) workCtx := actor.WithInternalActor(context.Background()) bstore, err := InitStore() @@ -47,7 +47,7 @@ func (j *bulkOperationProcessorJob) Routines(_ context.Context, observationCtx * bstore, resStore, sources.NewSourcer(httpcli.NewExternalClientFactory( - httpcli.NewLoggingMiddleware(observationCtx.Logger.Scoped("sourcer", "batches sourcer")), + httpcli.NewLoggingMiddleware(observationCtx.Logger.Scoped("sourcer")), )), ) diff --git a/cmd/worker/internal/batches/dbstore.go b/cmd/worker/internal/batches/dbstore.go index abf9f62b7d7..5a9217f4847 100644 --- a/cmd/worker/internal/batches/dbstore.go +++ b/cmd/worker/internal/batches/dbstore.go @@ -18,7 +18,7 @@ func InitStore() (*store.Store, error) { } var initStore = memo.NewMemoizedConstructor(func() (*store.Store, error) { - observationCtx := observation.NewContext(log.Scoped("store.batches", "batches store")) + observationCtx := observation.NewContext(log.Scoped("store.batches")) db, err := workerdb.InitDB(observationCtx) if err != nil { @@ -34,7 +34,7 @@ func InitReconcilerWorkerStore() (dbworkerstore.Store[*types.Changeset], error) } var initReconcilerWorkerStore = memo.NewMemoizedConstructor(func() (dbworkerstore.Store[*types.Changeset], error) { - observationCtx := observation.NewContext(log.Scoped("store.reconciler", "reconciler worker store")) + observationCtx := observation.NewContext(log.Scoped("store.reconciler")) db, err := workerdb.InitDB(observationCtx) if err != nil { @@ -50,7 +50,7 @@ func InitBulkOperationWorkerStore() (dbworkerstore.Store[*types.ChangesetJob], e } var initBulkOperationWorkerStore = memo.NewMemoizedConstructor(func() (dbworkerstore.Store[*types.ChangesetJob], error) { - observationCtx := observation.NewContext(log.Scoped("store.bulk_ops", "bulk operation worker store")) + observationCtx := observation.NewContext(log.Scoped("store.bulk_ops")) db, err := workerdb.InitDB(observationCtx) if err != nil { @@ -66,7 +66,7 @@ func InitBatchSpecWorkspaceExecutionWorkerStore() (dbworkerstore.Store[*types.Ba } var initBatchSpecWorkspaceExecutionWorkerStore = memo.NewMemoizedConstructor(func() (dbworkerstore.Store[*types.BatchSpecWorkspaceExecutionJob], error) { - observationCtx := observation.NewContext(log.Scoped("store.execution", "the batch spec workspace execution worker store")) + observationCtx := observation.NewContext(log.Scoped("store.execution")) db, err := workerdb.InitDB(observationCtx) if err != nil { @@ -82,7 +82,7 @@ func InitBatchSpecResolutionWorkerStore() (dbworkerstore.Store[*types.BatchSpecR } var initBatchSpecResolutionWorkerStore = memo.NewMemoizedConstructor(func() (dbworkerstore.Store[*types.BatchSpecResolutionJob], error) { - observationCtx := observation.NewContext(log.Scoped("store.batch_spec_resolution", "the batch spec resolution worker store")) + observationCtx := observation.NewContext(log.Scoped("store.batch_spec_resolution")) db, err := workerdb.InitDB(observationCtx) if err != nil { diff --git a/cmd/worker/internal/batches/janitor_job.go b/cmd/worker/internal/batches/janitor_job.go index a36566a9d0e..14afd376c40 100644 --- a/cmd/worker/internal/batches/janitor_job.go +++ b/cmd/worker/internal/batches/janitor_job.go @@ -27,7 +27,7 @@ func (j *janitorJob) Config() []env.Config { } func (j *janitorJob) Routines(_ context.Context, observationCtx *observation.Context) ([]goroutine.BackgroundRoutine, error) { - observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines", "janitor job routines")) + observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines")) workCtx := actor.WithInternalActor(context.Background()) bstore, err := InitStore() @@ -63,22 +63,22 @@ func (j *janitorJob) Routines(_ context.Context, observationCtx *observation.Con executorMetricsReporter, janitor.NewReconcilerWorkerResetter( - observationCtx.Logger.Scoped("ReconcilerWorkerResetter", ""), + observationCtx.Logger.Scoped("ReconcilerWorkerResetter"), reconcilerStore, janitorMetrics, ), janitor.NewBulkOperationWorkerResetter( - observationCtx.Logger.Scoped("BulkOperationWorkerResetter", ""), + observationCtx.Logger.Scoped("BulkOperationWorkerResetter"), bulkOperationStore, janitorMetrics, ), janitor.NewBatchSpecWorkspaceExecutionWorkerResetter( - observationCtx.Logger.Scoped("BatchSpecWorkspaceExecutionWorkerResetter", ""), + observationCtx.Logger.Scoped("BatchSpecWorkspaceExecutionWorkerResetter"), workspaceExecutionStore, janitorMetrics, ), janitor.NewBatchSpecWorkspaceResolutionWorkerResetter( - observationCtx.Logger.Scoped("BatchSpecWorkspaceResolutionWorkerResetter", ""), + observationCtx.Logger.Scoped("BatchSpecWorkspaceResolutionWorkerResetter"), workspaceResolutionStore, janitorMetrics, ), diff --git a/cmd/worker/internal/batches/reconciler_job.go b/cmd/worker/internal/batches/reconciler_job.go index a6b02c41a29..73c90f5f79a 100644 --- a/cmd/worker/internal/batches/reconciler_job.go +++ b/cmd/worker/internal/batches/reconciler_job.go @@ -29,7 +29,7 @@ func (j *reconcilerJob) Config() []env.Config { } func (j *reconcilerJob) Routines(_ context.Context, observationCtx *observation.Context) ([]goroutine.BackgroundRoutine, error) { - observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines", "reconciler job routines")) + observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines")) workCtx := actor.WithInternalActor(context.Background()) bstore, err := InitStore() @@ -49,7 +49,7 @@ func (j *reconcilerJob) Routines(_ context.Context, observationCtx *observation. reconcilerStore, gitserver.NewClient(), sources.NewSourcer(httpcli.NewExternalClientFactory( - httpcli.NewLoggingMiddleware(observationCtx.Logger.Scoped("sourcer", "batches sourcer")), + httpcli.NewLoggingMiddleware(observationCtx.Logger.Scoped("sourcer")), )), ) diff --git a/cmd/worker/internal/batches/workers/batch_spec_resolution_worker.go b/cmd/worker/internal/batches/workers/batch_spec_resolution_worker.go index 355bc3d104b..a565544f74b 100644 --- a/cmd/worker/internal/batches/workers/batch_spec_resolution_worker.go +++ b/cmd/worker/internal/batches/workers/batch_spec_resolution_worker.go @@ -24,7 +24,7 @@ func NewBatchSpecResolutionWorker( ) *workerutil.Worker[*btypes.BatchSpecResolutionJob] { e := &batchSpecWorkspaceCreator{ store: s, - logger: log.Scoped("batch-spec-workspace-creator", "The background worker running workspace resolutions for batch changes"), + logger: log.Scoped("batch-spec-workspace-creator"), } options := workerutil.WorkerOptions{ diff --git a/cmd/worker/internal/batches/workspace_resolver_job.go b/cmd/worker/internal/batches/workspace_resolver_job.go index b91a5f63eee..2f36cfd997c 100644 --- a/cmd/worker/internal/batches/workspace_resolver_job.go +++ b/cmd/worker/internal/batches/workspace_resolver_job.go @@ -26,7 +26,7 @@ func (j *workspaceResolverJob) Config() []env.Config { } func (j *workspaceResolverJob) Routines(_ context.Context, observationCtx *observation.Context) ([]goroutine.BackgroundRoutine, error) { - observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines", "workspace resolver job routines")) + observationCtx = observation.NewContext(observationCtx.Logger.Scoped("routines")) workCtx := actor.WithInternalActor(context.Background()) bstore, err := InitStore() diff --git a/cmd/worker/internal/codygateway/usageworker.go b/cmd/worker/internal/codygateway/usageworker.go index d8a11db9e4a..b570b8c72fd 100644 --- a/cmd/worker/internal/codygateway/usageworker.go +++ b/cmd/worker/internal/codygateway/usageworker.go @@ -31,7 +31,7 @@ func (j *usageJob) Config() []env.Config { } func (j *usageJob) Routines(_ context.Context, observationCtx *observation.Context) ([]goroutine.BackgroundRoutine, error) { - return []goroutine.BackgroundRoutine{&usageRoutine{logger: observationCtx.Logger.Scoped("CodyGatewayUsageWorker", "")}}, nil + return []goroutine.BackgroundRoutine{&usageRoutine{logger: observationCtx.Logger.Scoped("CodyGatewayUsageWorker")}}, nil } const ( diff --git a/cmd/worker/internal/executors/multiqueue_cache_cleaner.go b/cmd/worker/internal/executors/multiqueue_cache_cleaner.go index a571d7c5252..6d57a57a5c5 100644 --- a/cmd/worker/internal/executors/multiqueue_cache_cleaner.go +++ b/cmd/worker/internal/executors/multiqueue_cache_cleaner.go @@ -28,7 +28,7 @@ var _ goroutine.Handler = &multiqueueCacheCleaner{} // window size. A cache key is represented by a queue name; the value is a hash containing timestamps as the field key and the // job ID as the field value (which is not used for anything currently). func NewMultiqueueCacheCleaner(queueNames []string, cache *rcache.Cache, windowSize time.Duration, cleanupInterval time.Duration) goroutine.BackgroundRoutine { - logger := log.Scoped("multiqueue-cache-cleaner", "Periodically removes entries from the multiqueue dequeue cache that are older than the configured window size.") + logger := log.Scoped("multiqueue-cache-cleaner") observationCtx := observation.NewContext(logger) handler := &multiqueueCacheCleaner{ queueNames: queueNames, @@ -88,7 +88,7 @@ func (m *multiqueueCacheCleaner) Handle(ctx context.Context) error { var timeNow = time.Now func (m *multiqueueCacheCleaner) initMetrics(observationCtx *observation.Context, queue string, constLabels prometheus.Labels) { - logger := observationCtx.Logger.Scoped("multiqueue.cachecleaner.metrics", "") + logger := observationCtx.Logger.Scoped("multiqueue.cachecleaner.metrics") observationCtx.Registerer.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: "multiqueue_executor_dequeue_cache_size", Help: "Current size of the executor dequeue cache", diff --git a/cmd/worker/internal/githubapps/job.go b/cmd/worker/internal/githubapps/job.go index 45d75288086..a116b1acf2f 100644 --- a/cmd/worker/internal/githubapps/job.go +++ b/cmd/worker/internal/githubapps/job.go @@ -35,7 +35,7 @@ func (gh *githupAppsInstallationJob) Routines(ctx context.Context, observationCt return nil, errors.Wrap(err, "init DB") } - logger := log.Scoped("github_apps_installation", "") + logger := log.Scoped("github_apps_installation") return []goroutine.BackgroundRoutine{ goroutine.NewPeriodicGoroutine( context.Background(), diff --git a/cmd/worker/internal/licensecheck/check.go b/cmd/worker/internal/licensecheck/check.go index be3b5f2a186..7c48fe8feba 100644 --- a/cmd/worker/internal/licensecheck/check.go +++ b/cmd/worker/internal/licensecheck/check.go @@ -195,7 +195,7 @@ func StartLicenseCheck(originalCtx context.Context, logger log.Logger, db databa routine := goroutine.NewPeriodicGoroutine( ctxWithCancel, - &licenseChecker{siteID: siteID, token: licenseToken, doer: httpcli.ExternalDoer, logger: logger.Scoped("licenseChecker", "Periodically checks license validity")}, + &licenseChecker{siteID: siteID, token: licenseToken, doer: httpcli.ExternalDoer, logger: logger.Scoped("licenseChecker")}, goroutine.WithName("licensing.check-license-validity"), goroutine.WithDescription("check if license is valid from sourcegraph.com"), goroutine.WithInterval(licensing.LicenseCheckInterval), diff --git a/cmd/worker/internal/outboundwebhooks/job.go b/cmd/worker/internal/outboundwebhooks/job.go index 7c2685a231b..0a5c3af5f15 100644 --- a/cmd/worker/internal/outboundwebhooks/job.go +++ b/cmd/worker/internal/outboundwebhooks/job.go @@ -36,7 +36,7 @@ func (*sender) Config() []env.Config { } func (s *sender) Routines(_ context.Context, observationCtx *observation.Context) ([]goroutine.BackgroundRoutine, error) { - observationCtx = observation.NewContext(observationCtx.Logger.Scoped("sender", "outbound webhook sender")) + observationCtx = observation.NewContext(observationCtx.Logger.Scoped("sender")) ctx := actor.WithInternalActor(context.Background()) db, err := workerdb.InitDB(observationCtx) diff --git a/cmd/worker/internal/permissions/bitbucket_projects.go b/cmd/worker/internal/permissions/bitbucket_projects.go index 1a00021d4c1..fde222ec287 100644 --- a/cmd/worker/internal/permissions/bitbucket_projects.go +++ b/cmd/worker/internal/permissions/bitbucket_projects.go @@ -83,7 +83,7 @@ type bitbucketProjectPermissionsHandler struct { // Handle implements the workerutil.Handler interface. func (h *bitbucketProjectPermissionsHandler) Handle(ctx context.Context, logger log.Logger, workerJob *types.BitbucketProjectPermissionJob) (err error) { - logger = logger.Scoped("BitbucketProjectPermissionsHandler", "handles jobs to apply explicit permissions to all repositories of a Bitbucket Project") + logger = logger.Scoped("BitbucketProjectPermissionsHandler") defer func() { if err != nil { logger.Error("Handle", log.Error(err)) @@ -355,7 +355,7 @@ func (h *bitbucketProjectPermissionsHandler) repoExists(ctx context.Context, rep // newBitbucketProjectPermissionsWorker creates a worker that reads the explicit_permissions_bitbucket_projects_jobs table and // executes the jobs. func newBitbucketProjectPermissionsWorker(ctx context.Context, observationCtx *observation.Context, db database.DB, cfg *config, metrics bitbucketProjectPermissionsMetrics) *workerutil.Worker[*types.BitbucketProjectPermissionJob] { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BitbucketProjectPermissionsWorker", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BitbucketProjectPermissionsWorker"), observationCtx) options := workerutil.WorkerOptions{ Name: "explicit_permissions_bitbucket_projects_jobs_worker", @@ -374,7 +374,7 @@ func newBitbucketProjectPermissionsWorker(ctx context.Context, observationCtx *o // newBitbucketProjectPermissionsResetter implements resetter for the explicit_permissions_bitbucket_projects_jobs table. // See resetter documentation for more details. https://docs.sourcegraph.com/dev/background-information/workers#dequeueing-and-resetting-jobs func newBitbucketProjectPermissionsResetter(observationCtx *observation.Context, db database.DB, cfg *config, metrics bitbucketProjectPermissionsMetrics) *dbworker.Resetter[*types.BitbucketProjectPermissionJob] { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BitbucketProjectPermissionsResetter", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BitbucketProjectPermissionsResetter"), observationCtx) workerStore := createBitbucketProjectPermissionsStore(observationCtx, db, cfg) @@ -393,7 +393,7 @@ func newBitbucketProjectPermissionsResetter(observationCtx *observation.Context, // createBitbucketProjectPermissionsStore creates a store that reads and writes to the explicit_permissions_bitbucket_projects_jobs table. // It is used by the worker and resetter. func createBitbucketProjectPermissionsStore(observationCtx *observation.Context, s basestore.ShareableStore, cfg *config) dbworkerstore.Store[*types.BitbucketProjectPermissionJob] { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BitbucketProjectPermission.Store", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BitbucketProjectPermission.Store"), observationCtx) return dbworkerstore.New(observationCtx, s.Handle(), dbworkerstore.Options[*types.BitbucketProjectPermissionJob]{ Name: "explicit_permissions_bitbucket_projects_jobs_store", @@ -417,7 +417,7 @@ type bitbucketProjectPermissionsMetrics struct { } func newMetricsForBitbucketProjectPermissionsQueries(logger log.Logger) bitbucketProjectPermissionsMetrics { - observationCtx := observation.NewContext(logger.Scoped("routines", "bitbucket projects explicit permissions job routines")) + observationCtx := observation.NewContext(logger.Scoped("routines")) resetFailures := prometheus.NewCounter(prometheus.CounterOpts{ Name: "src_explicit_permissions_bitbucket_project_query_reset_failures_total", diff --git a/cmd/worker/internal/ratelimit/job.go b/cmd/worker/internal/ratelimit/job.go index 0847d38cd47..ea819a67ed7 100644 --- a/cmd/worker/internal/ratelimit/job.go +++ b/cmd/worker/internal/ratelimit/job.go @@ -31,7 +31,7 @@ func (s *rateLimitConfigJob) Routines(_ context.Context, observationCtx *observa if err != nil { return nil, err } - logger := observationCtx.Logger.Scoped("Periodic rate limit config job", "Routine that periodically copies rate limit configurations to Redis.") + logger := observationCtx.Logger.Scoped("Periodic rate limit config job") rlcWorker := makeRateLimitConfigWorker(&handler{ logger: logger, externalServiceStore: db.ExternalServices(), diff --git a/cmd/worker/internal/search/exhaustive_search.go b/cmd/worker/internal/search/exhaustive_search.go index 2c545e10881..c3ad13aaf6e 100644 --- a/cmd/worker/internal/search/exhaustive_search.go +++ b/cmd/worker/internal/search/exhaustive_search.go @@ -27,7 +27,7 @@ func newExhaustiveSearchWorker( config config, ) goroutine.BackgroundRoutine { handler := &exhaustiveSearchHandler{ - logger: log.Scoped("exhaustive-search", "The background worker running exhaustive searches"), + logger: log.Scoped("exhaustive-search"), store: exhaustiveSearchStore, newSearcher: newSearcher, } diff --git a/cmd/worker/internal/search/exhaustive_search_repo.go b/cmd/worker/internal/search/exhaustive_search_repo.go index 9ac3d13c6e8..991f907299a 100644 --- a/cmd/worker/internal/search/exhaustive_search_repo.go +++ b/cmd/worker/internal/search/exhaustive_search_repo.go @@ -27,7 +27,7 @@ func newExhaustiveSearchRepoWorker( config config, ) goroutine.BackgroundRoutine { handler := &exhaustiveSearchRepoHandler{ - logger: log.Scoped("exhaustive-search-repo", "The background worker running exhaustive searches on a repository"), + logger: log.Scoped("exhaustive-search-repo"), store: exhaustiveSearchStore, newSearcher: newSearcher, } diff --git a/cmd/worker/internal/search/exhaustive_search_repo_revision.go b/cmd/worker/internal/search/exhaustive_search_repo_revision.go index 97f4e813f7b..cae181f3eda 100644 --- a/cmd/worker/internal/search/exhaustive_search_repo_revision.go +++ b/cmd/worker/internal/search/exhaustive_search_repo_revision.go @@ -32,7 +32,7 @@ func newExhaustiveSearchRepoRevisionWorker( config config, ) goroutine.BackgroundRoutine { handler := &exhaustiveSearchRepoRevHandler{ - logger: log.Scoped("exhaustive-search-repo-revision", "The background worker running exhaustive searches on a revision of a repository"), + logger: log.Scoped("exhaustive-search-repo-revision"), store: exhaustiveSearchStore, newSearcher: newSearcher, uploadStore: uploadStore, diff --git a/cmd/worker/internal/search/job.go b/cmd/worker/internal/search/job.go index 8967892aa2a..7588695c0b6 100644 --- a/cmd/worker/internal/search/job.go +++ b/cmd/worker/internal/search/job.go @@ -103,7 +103,7 @@ func (j *searchJob) newSearchJobRoutines( ) observationCtx = observation.ContextWithLogger( - observationCtx.Logger.Scoped("routines", "exhaustive search job routines"), + observationCtx.Logger.Scoped("routines"), observationCtx, ) diff --git a/cmd/worker/internal/telemetrygatewayexporter/exporter.go b/cmd/worker/internal/telemetrygatewayexporter/exporter.go index 2fdc36d9952..373cbf40797 100644 --- a/cmd/worker/internal/telemetrygatewayexporter/exporter.go +++ b/cmd/worker/internal/telemetrygatewayexporter/exporter.go @@ -87,7 +87,7 @@ func (j *exporterJob) Handle(ctx context.Context) error { // as each worker run will create a new one. exporter, err := telemetrygateway.NewExporter( ctx, - j.logger.Scoped("exporter", "exporter client"), + j.logger.Scoped("exporter"), conf.DefaultClient(), j.globalStateStore, ConfigInst.ExportAddress, diff --git a/cmd/worker/shared/main.go b/cmd/worker/shared/main.go index 6f308aae147..37bbe99df04 100644 --- a/cmd/worker/shared/main.go +++ b/cmd/worker/shared/main.go @@ -329,7 +329,7 @@ func runRoutinesConcurrently(observationCtx *observation.Context, jobs map[strin defer cancel() for _, name := range jobNames(jobs) { - jobLogger := observationCtx.Logger.Scoped(name, jobs[name].Description()) + jobLogger := observationCtx.Logger.Scoped(name) observationCtx := observation.ContextWithLogger(jobLogger, observationCtx) if !shouldRunJob(name) { @@ -382,7 +382,7 @@ func jobNames(jobs map[string]workerjob.Job) []string { // the jobs configured in this service. This also enables repository update operations to fetch // permissions from code hosts. func setAuthzProviders(ctx context.Context, observationCtx *observation.Context) { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("authz-provider", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("authz-provider"), observationCtx) db, err := workerdb.InitDB(observationCtx) if err != nil { return diff --git a/deps.bzl b/deps.bzl index 1c5747f4bfb..e945379e0c5 100644 --- a/deps.bzl +++ b/deps.bzl @@ -5903,8 +5903,8 @@ def go_dependencies(): name = "com_github_sourcegraph_log", build_file_proto_mode = "disable_global", importpath = "github.com/sourcegraph/log", - sum = "h1:eALn0yz3ljezZVueXObvk0hHBr3Qsox+L78//9qps+M=", - version = "v0.0.0-20230918165208-4a174e4ec4cf", + sum = "h1:tHKdC+bXxxGJ0cy/R06kg6Z0zqwVGOWMx8uWsIwsaoY=", + version = "v0.0.0-20231018134238-fbadff7458bb", ) go_repository( name = "com_github_sourcegraph_managed_services_platform_cdktf_gen_cloudflare", @@ -5933,8 +5933,8 @@ def go_dependencies(): name = "com_github_sourcegraph_mountinfo", build_file_proto_mode = "disable_global", importpath = "github.com/sourcegraph/mountinfo", - sum = "h1:NSYSPQOE7yyyytLbKQHjxSkPnBagaGQblgVMQrQ1je0=", - version = "v0.0.0-20230106004439-7026e28cef67", + sum = "h1:2lUb58rz1bq77wL3hb6OBT58uBVtlNs1o23Kahfj/kU=", + version = "v0.0.0-20231018142932-e00da332dac5", ) go_repository( name = "com_github_sourcegraph_run", @@ -5964,8 +5964,8 @@ def go_dependencies(): name = "com_github_sourcegraph_zoekt", build_file_proto_mode = "disable_global", importpath = "github.com/sourcegraph/zoekt", - sum = "h1:1Xa7GWtMdnatmIqzOsAhLigU+SttgXPvygKn0eMJZzc=", - version = "v0.0.0-20231017111049-f17ff0bac96a", + sum = "h1:2IPj5DTbBb8cAPF5ZJGIVxO81MTLpuF68YQPyr1KbzQ=", + version = "v0.0.0-20231018143538-16e2ff8c98ee", ) go_repository( diff --git a/dev/build-tracker/build/build.go b/dev/build-tracker/build/build.go index 2248d4f1cf0..dea1e5c5509 100644 --- a/dev/build-tracker/build/build.go +++ b/dev/build-tracker/build/build.go @@ -226,7 +226,7 @@ type Store struct { func NewBuildStore(logger log.Logger) *Store { return &Store{ - logger: logger.Scoped("store", "stores all the buildkite builds"), + logger: logger.Scoped("store"), builds: make(map[int]*Build), consecutiveFailures: make(map[string]int), diff --git a/dev/build-tracker/main.go b/dev/build-tracker/main.go index 536b418207e..933132dfb05 100644 --- a/dev/build-tracker/main.go +++ b/dev/build-tracker/main.go @@ -42,7 +42,7 @@ type Server struct { // NewServer creatse a new server to listen for Buildkite webhook events. func NewServer(logger log.Logger, c config.Config) *Server { - logger = logger.Scoped("server", "Server which tracks events received from Buildkite and sends notifications on failures") + logger = logger.Scoped("server") server := &Server{ logger: logger, store: build.NewBuildStore(logger), @@ -278,7 +278,7 @@ func main() { }) defer sync.Sync() - logger := log.Scoped("BuildTracker", "main entrypoint for Build Tracking Server") + logger := log.Scoped("BuildTracker") serverConf, err := config.NewFromEnv() if err != nil { diff --git a/dev/build-tracker/notify/slack.go b/dev/build-tracker/notify/slack.go index 3bed1b64089..fc826cd2c09 100644 --- a/dev/build-tracker/notify/slack.go +++ b/dev/build-tracker/notify/slack.go @@ -112,7 +112,7 @@ func NewClient(logger log.Logger, slackToken, githubToken, channel string) *Clie history := make(map[int]*SlackNotification) return &Client{ - logger: logger.Scoped("notificationClient", "client which interacts with Slack and Github to send notifications"), + logger: logger.Scoped("notificationClient"), slack: *slackClient, team: teamResolver, channel: channel, diff --git a/dev/ci/gen-pipeline.go b/dev/ci/gen-pipeline.go index f9c45ac8aac..c86d0f0b305 100644 --- a/dev/ci/gen-pipeline.go +++ b/dev/ci/gen-pipeline.go @@ -50,7 +50,7 @@ func main() { )) defer liblog.Sync() - logger := log.Scoped("gen-pipeline", "generates the pipeline for ci") + logger := log.Scoped("gen-pipeline") if docs { renderPipelineDocs(logger, os.Stdout) diff --git a/dev/ci/integration/executors/tester/main.go b/dev/ci/integration/executors/tester/main.go index cabb1c8fc18..b118252dba6 100644 --- a/dev/ci/integration/executors/tester/main.go +++ b/dev/ci/integration/executors/tester/main.go @@ -30,7 +30,7 @@ func main() { }) defer logfuncs.Sync() - logger := log.Scoped("init", "runner initialization process") + logger := log.Scoped("init") db, err := initDB(logger) if err != nil { diff --git a/dev/ci/internal/ci/wolfi_operations.go b/dev/ci/internal/ci/wolfi_operations.go index b32470e7997..7c9ac2fee85 100644 --- a/dev/ci/internal/ci/wolfi_operations.go +++ b/dev/ci/internal/ci/wolfi_operations.go @@ -47,7 +47,7 @@ func WolfiPackagesOperations(changedFiles []string) (*operations.Set, []string) // WolfiBaseImagesOperations rebuilds any base images whose configurations have changed func WolfiBaseImagesOperations(changedFiles []string, tag string, packagesChanged bool) (*operations.Set, int) { ops := operations.NewNamedSet("Base image builds") - logger := log.Scoped("gen-pipeline", "generates the pipeline for ci") + logger := log.Scoped("gen-pipeline") var buildStepKeys []string for _, c := range changedFiles { diff --git a/dev/deployment-notifier/main.go b/dev/deployment-notifier/main.go index 56f837294d3..b852667b1e4 100644 --- a/dev/deployment-notifier/main.go +++ b/dev/deployment-notifier/main.go @@ -48,7 +48,7 @@ func main() { ctx := context.Background() liblog := log.Init(log.Resource{Name: "deployment-notifier"}) defer liblog.Sync() - logger = log.Scoped("main", "a script that checks for deployment notifications") + logger = log.Scoped("main") flags := &Flags{} flags.Parse() diff --git a/dev/internal/cmd/app-discover-repos/app-discover-repos.go b/dev/internal/cmd/app-discover-repos/app-discover-repos.go index d369923d817..689d07995e7 100644 --- a/dev/internal/cmd/app-discover-repos/app-discover-repos.go +++ b/dev/internal/cmd/app-discover-repos/app-discover-repos.go @@ -42,7 +42,7 @@ func main() { srv := &servegit.Serve{ ServeConfig: c.ServeConfig, - Logger: log.Scoped("serve", ""), + Logger: log.Scoped("serve"), } if *lsRemote { diff --git a/dev/internal/cmd/search-plan/search-plan.go b/dev/internal/cmd/search-plan/search-plan.go index 046e6b1bb3b..c79f22a029e 100644 --- a/dev/internal/cmd/search-plan/search-plan.go +++ b/dev/internal/cmd/search-plan/search-plan.go @@ -44,7 +44,7 @@ func run(w io.Writer, args []string) error { // Sourcegraph infra we need conf.Mock(&conf.Unified{}) envvar.MockSourcegraphDotComMode(*dotCom) - logger := log.Scoped("search-plan", "") + logger := log.Scoped("search-plan") cli := client.Mocked(job.RuntimeClients{Logger: logger}) diff --git a/dev/scaletesting/bulkreposettings/main.go b/dev/scaletesting/bulkreposettings/main.go index 4027853307c..0d8be658aef 100644 --- a/dev/scaletesting/bulkreposettings/main.go +++ b/dev/scaletesting/bulkreposettings/main.go @@ -60,7 +60,7 @@ var app = &cli.App{ Name: "private", Description: "Set repo visibility to private", Action: func(cmd *cli.Context) error { - logger := log.Scoped("runner", "") + logger := log.Scoped("runner") ctx := context.Background() tc := oauth2.NewClient(ctx, oauth2.StaticTokenSource( &oauth2.Token{AccessToken: cmd.String("github.token")}, @@ -348,7 +348,7 @@ func main() { Name: "codehostcopy", }) defer cb.Sync() - logger := log.Scoped("main", "") + logger := log.Scoped("main") if err := app.RunContext(context.Background(), os.Args); err != nil { logger.Fatal("failed to run", log.Error(err)) diff --git a/dev/scaletesting/codehostcopy/bitbucket.go b/dev/scaletesting/codehostcopy/bitbucket.go index a32dd3c773b..c6df9685344 100644 --- a/dev/scaletesting/codehostcopy/bitbucket.go +++ b/dev/scaletesting/codehostcopy/bitbucket.go @@ -41,7 +41,7 @@ func NewBitbucketCodeHost(logger log.Logger, def *CodeHostDefinition) (*Bitbucke c := bitbucket.NewBasicAuthClient(def.Username, def.Password, u, bitbucket.WithTimeout(15*time.Second)) return &BitbucketCodeHost{ - logger: logger.Scoped("bitbucket", "client that interacts with bitbucket server rest api"), + logger: logger.Scoped("bitbucket"), def: def, c: c, perPage: 30, diff --git a/dev/scaletesting/codehostcopy/dummy.go b/dev/scaletesting/codehostcopy/dummy.go index 70cb7895d98..ba3fa3a4aee 100644 --- a/dev/scaletesting/codehostcopy/dummy.go +++ b/dev/scaletesting/codehostcopy/dummy.go @@ -17,7 +17,7 @@ var _ CodeHostDestination = (*DummyCodeHostDestination)(nil) func NewDummyCodeHost(logger log.Logger, def *CodeHostDefinition) *DummyCodeHostDestination { return &DummyCodeHostDestination{ - logger: logger.Scoped("dummy", "DummyCodeHost, pretending to perform actions"), + logger: logger.Scoped("dummy"), def: def, } } diff --git a/dev/scaletesting/codehostcopy/main.go b/dev/scaletesting/codehostcopy/main.go index 4346714251a..b0a1e63efd0 100644 --- a/dev/scaletesting/codehostcopy/main.go +++ b/dev/scaletesting/codehostcopy/main.go @@ -68,7 +68,7 @@ var app = &cli.App{ }, }, Action: func(cmd *cli.Context) error { - return doRun(cmd.Context, log.Scoped("runner", ""), cmd.String("state"), cmd.String("config")) + return doRun(cmd.Context, log.Scoped("runner"), cmd.String("state"), cmd.String("config")) }, Commands: []*cli.Command{ { @@ -83,7 +83,7 @@ var app = &cli.App{ Name: "list", Description: "list repos from the 'from' codehost defined in the configuration", Action: func(cmd *cli.Context) error { - return doList(cmd.Context, log.Scoped("list", ""), cmd.String("state"), cmd.String("config"), cmd.Int("limit")) + return doList(cmd.Context, log.Scoped("list"), cmd.String("state"), cmd.String("config"), cmd.Int("limit")) }, Flags: []cli.Flag{ &cli.IntFlag{ @@ -179,7 +179,7 @@ func main() { Name: "codehostcopy", }) defer cb.Sync() - logger := log.Scoped("main", "") + logger := log.Scoped("main") if err := app.RunContext(context.Background(), os.Args); err != nil { logger.Fatal("failed to run", log.Error(err)) diff --git a/dev/sg/internal/migration/squash.go b/dev/sg/internal/migration/squash.go index 57f195c5907..3d5ceeab3cf 100644 --- a/dev/sg/internal/migration/squash.go +++ b/dev/sg/internal/migration/squash.go @@ -264,7 +264,7 @@ func setupDatabaseForSquash(database db.Database, runInContainer, runInTimescale // runTargetedUpMigrations runs up migration targeting the given versions on the given database instance. func runTargetedUpMigrations(database db.Database, targetVersions []int, postgresDSN string) (err error) { - logger := log.Scoped("runTargetedUpMigrations", "") + logger := log.Scoped("runTargetedUpMigrations") pending := std.Out.Pending(output.Line("", output.StylePending, "Migrating PostgreSQL schema...")) defer func() { diff --git a/dev/sg/internal/rfc/rfc.go b/dev/sg/internal/rfc/rfc.go index 79a17cd155a..fa8aa281df5 100644 --- a/dev/sg/internal/rfc/rfc.go +++ b/dev/sg/internal/rfc/rfc.go @@ -143,7 +143,7 @@ func authResponseHandler(sendCode chan string, sendError chan error, gracefulShu // gracefulShutdown: Whether the server shutdown gracefully after handling a request. // handler: The request handler for the server, containing the authEndpoint. func startAuthHandlerServer(socket net.Listener, authEndpoint string, codeReceiver chan string, errorReceiver chan error) { - logger := log.Scoped("rfc_auth_handler", "sg rfc oauth handler") + logger := log.Scoped("rfc_auth_handler") var server http.Server gracefulShutdown := false diff --git a/dev/sg/sg_audit.go b/dev/sg/sg_audit.go index fd1fea71300..87743122eee 100644 --- a/dev/sg/sg_audit.go +++ b/dev/sg/sg_audit.go @@ -56,7 +56,7 @@ var auditCommand = &cli.Command{ &oauth2.Token{AccessToken: auditPRGitHubToken}, ))) - logger := log.Scoped("auditPR", "sg audit pr") + logger := log.Scoped("auditPR") logger.Debug("fetching issues") issues, err := fetchIssues(ctx.Context, logger, ghc) if err != nil { diff --git a/dev/sg/sg_db.go b/dev/sg/sg_db.go index 088f8a5a9ce..3b0298f5dca 100644 --- a/dev/sg/sg_db.go +++ b/dev/sg/sg_db.go @@ -186,7 +186,7 @@ sg db add-access-token -username=foo func dbAddUserAction(cmd *cli.Context) error { ctx := cmd.Context - logger := log.Scoped("dbAddUserAction", "") + logger := log.Scoped("dbAddUserAction") // Read the configuration. conf, _ := getConfig() @@ -241,7 +241,7 @@ func dbAddUserAction(cmd *cli.Context) error { func dbAddAccessTokenAction(cmd *cli.Context) error { ctx := cmd.Context - logger := log.Scoped("dbAddAccessTokenAction", "") + logger := log.Scoped("dbAddAccessTokenAction") // Read the configuration. conf, _ := getConfig() @@ -285,7 +285,7 @@ func dbAddAccessTokenAction(cmd *cli.Context) error { } func dbUpdateUserExternalAccount(cmd *cli.Context) error { - logger := log.Scoped("dbUpdateUserExternalAccount", "") + logger := log.Scoped("dbUpdateUserExternalAccount") ctx := cmd.Context username := cmd.String("sg.username") serviceName := cmd.String("extsvc.display-name") @@ -450,7 +450,7 @@ func deleteTestDBsExec(ctx *cli.Context) error { } dsn := config.String() - db, err := dbconn.ConnectInternal(log.Scoped("sg", ""), dsn, "", "") + db, err := dbconn.ConnectInternal(log.Scoped("sg"), dsn, "", "") if err != nil { return err } @@ -537,7 +537,7 @@ func dbResetPGExec(ctx *cli.Context) error { storeFactory := func(db *sql.DB, migrationsTable string) connections.Store { return connections.NewStoreShim(store.NewWithDB(&observation.TestContext, db, migrationsTable)) } - r, err := connections.RunnerFromDSNs(std.Out.Output, log.Scoped("migrations.runner", ""), dsnMap, "sg", storeFactory) + r, err := connections.RunnerFromDSNs(std.Out.Output, log.Scoped("migrations.runner"), dsnMap, "sg", storeFactory) if err != nil { return err } diff --git a/dev/sg/sg_insights.go b/dev/sg/sg_insights.go index 7560a2e4f94..f395ca904ff 100644 --- a/dev/sg/sg_insights.go +++ b/dev/sg/sg_insights.go @@ -63,7 +63,7 @@ func getInsightSeriesIDsAction(cmd *cli.Context) error { std.Out.WriteNoticef("Finding the Series IDs for %s", ids[0]) ctx := cmd.Context - logger := log.Scoped("getInsightSeriesIDsAction", "") + logger := log.Scoped("getInsightSeriesIDsAction") // Read the configuration. conf, err := getConfig() diff --git a/dev/sg/sg_migration.go b/dev/sg/sg_migration.go index 253848ddc24..b0475b74dc7 100644 --- a/dev/sg/sg_migration.go +++ b/dev/sg/sg_migration.go @@ -225,7 +225,7 @@ func makeRunnerWithSchemas(schemaNames []string, schemas []*schemas.Schema) (*ru // configuration and use process env as fallback. var getEnv func(string) string config, _ := getConfig() - logger := log.Scoped("migrations.runner", "migration runner") + logger := log.Scoped("migrations.runner") if config != nil { getEnv = config.GetEnv } else { diff --git a/dev/sg/sg_page.go b/dev/sg/sg_page.go index b9e841dc5e0..e108ce04600 100644 --- a/dev/sg/sg_page.go +++ b/dev/sg/sg_page.go @@ -56,7 +56,7 @@ var pageCommand = &cli.Command{ } func pageExec(cmd *cli.Context) error { - logger := log.Scoped("pager", "paging client for SG") + logger := log.Scoped("pager") priority, err := parseOpsGeniePriority(cmd.String("priority")) if err != nil { diff --git a/docker-images/prometheus/cmd/prom-wrapper/main.go b/docker-images/prometheus/cmd/prom-wrapper/main.go index c044cde9abf..9da882c780d 100644 --- a/docker-images/prometheus/cmd/prom-wrapper/main.go +++ b/docker-images/prometheus/cmd/prom-wrapper/main.go @@ -49,7 +49,7 @@ func main() { }) defer liblog.Sync() - logger := log.Scoped("prom-wrapper", "sourcegraph/prometheus wrapper program") + logger := log.Scoped("prom-wrapper") ctx := context.Background() disableAlertmanager := noAlertmanager == "true" @@ -98,7 +98,7 @@ func main() { logger.Info("DISABLE_SOURCEGRAPH_CONFIG=true; configuration syncing is disabled") } else { logger.Info("initializing configuration") - subscriber := NewSiteConfigSubscriber(logger.Scoped("siteconfig", "site configuration subscriber"), alertmanager) + subscriber := NewSiteConfigSubscriber(logger.Scoped("siteconfig"), alertmanager) // watch for configuration updates in the background go subscriber.Subscribe(ctx) diff --git a/docker-images/prometheus/cmd/prom-wrapper/status.go b/docker-images/prometheus/cmd/prom-wrapper/status.go index 11378b72f36..cd333e86742 100644 --- a/docker-images/prometheus/cmd/prom-wrapper/status.go +++ b/docker-images/prometheus/cmd/prom-wrapper/status.go @@ -20,7 +20,7 @@ type AlertsStatusReporter struct { func NewAlertsStatusReporter(logger log.Logger, alertmanager *amclient.Alertmanager) *AlertsStatusReporter { return &AlertsStatusReporter{ - log: logger.Scoped("alerts-status", "alerts status reporter"), + log: logger.Scoped("alerts-status"), alertmanager: alertmanager, } } diff --git a/go.mod b/go.mod index 2dd06dad355..1aa404e2dd9 100644 --- a/go.mod +++ b/go.mod @@ -180,7 +180,7 @@ require ( github.com/sourcegraph/go-lsp v0.0.0-20200429204803-219e11d77f5d github.com/sourcegraph/go-rendezvous v0.0.0-20210910070954-ef39ade5591d github.com/sourcegraph/jsonx v0.0.0-20200629203448-1a936bd500cf - github.com/sourcegraph/log v0.0.0-20230918165208-4a174e4ec4cf + github.com/sourcegraph/log v0.0.0-20231018134238-fbadff7458bb github.com/sourcegraph/run v0.12.0 github.com/sourcegraph/scip v0.3.1-0.20230627154934-45df7f6d33fc github.com/sourcegraph/sourcegraph/dev/ci/images v0.0.0-20220203145655-4d2a39d3038a @@ -545,9 +545,9 @@ require ( github.com/scim2/filter-parser/v2 v2.2.0 github.com/snabb/diagio v1.0.0 // indirect github.com/sourcegraph/conc v0.2.0 - github.com/sourcegraph/mountinfo v0.0.0-20230106004439-7026e28cef67 + github.com/sourcegraph/mountinfo v0.0.0-20231018142932-e00da332dac5 github.com/sourcegraph/sourcegraph/monitoring v0.0.0-20230124144931-b2d81b1accb6 - github.com/sourcegraph/zoekt v0.0.0-20231017111049-f17ff0bac96a + github.com/sourcegraph/zoekt v0.0.0-20231018143538-16e2ff8c98ee github.com/spf13/cobra v1.7.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.0 // indirect diff --git a/go.sum b/go.sum index 1bd3d5f59bf..537b2b1cbfa 100644 --- a/go.sum +++ b/go.sum @@ -1574,16 +1574,16 @@ github.com/sourcegraph/httpgzip v0.0.0-20211015085752-0bad89b3b4df h1:VaS8k40GiN github.com/sourcegraph/httpgzip v0.0.0-20211015085752-0bad89b3b4df/go.mod h1:RqWagzxNGCvucQQC9vX6aps474LCCOgshDpUTTyb+O8= github.com/sourcegraph/jsonx v0.0.0-20200629203448-1a936bd500cf h1:oAdWFqhStsWiiMP/vkkHiMXqFXzl1XfUNOdxKJbd6bI= github.com/sourcegraph/jsonx v0.0.0-20200629203448-1a936bd500cf/go.mod h1:ppFaPm6kpcHnZGqQTFhUIAQRIEhdQDWP1PCv4/ON354= -github.com/sourcegraph/log v0.0.0-20230918165208-4a174e4ec4cf h1:eALn0yz3ljezZVueXObvk0hHBr3Qsox+L78//9qps+M= -github.com/sourcegraph/log v0.0.0-20230918165208-4a174e4ec4cf/go.mod h1:IDp09QkoqS8Z3CyN2RW6vXjgABkNpDbyjLIHNQwQ8P8= +github.com/sourcegraph/log v0.0.0-20231018134238-fbadff7458bb h1:tHKdC+bXxxGJ0cy/R06kg6Z0zqwVGOWMx8uWsIwsaoY= +github.com/sourcegraph/log v0.0.0-20231018134238-fbadff7458bb/go.mod h1:IDp09QkoqS8Z3CyN2RW6vXjgABkNpDbyjLIHNQwQ8P8= github.com/sourcegraph/managed-services-platform-cdktf/gen/cloudflare v0.0.0-20230822024612-edb48c530722 h1:0bXluGjV4O3XBeFA3Kck9kHS3ilvgJo8mW9ADx8oeHE= github.com/sourcegraph/managed-services-platform-cdktf/gen/cloudflare v0.0.0-20230822024612-edb48c530722/go.mod h1:Djd6jHBZqe4/+MEpiBxKvzPS24NqYxNBbAYv/0074JI= github.com/sourcegraph/managed-services-platform-cdktf/gen/google v0.0.0-20230822024612-edb48c530722 h1:oc+BUJi+WYypX8i7vnxz4D4z/99b/H0u5Oc+b1rA5fI= github.com/sourcegraph/managed-services-platform-cdktf/gen/google v0.0.0-20230822024612-edb48c530722/go.mod h1:P1liUcPEczidOp1kyPvi2eqil4243IuPJ+tCOASCHEk= github.com/sourcegraph/managed-services-platform-cdktf/gen/random v0.0.0-20230822024612-edb48c530722 h1:N0OxHqeujHxvVU666KQY9whauLyw4s3BJGBLxx6gKR0= github.com/sourcegraph/managed-services-platform-cdktf/gen/random v0.0.0-20230822024612-edb48c530722/go.mod h1:TiUqRvYs/Gah8bGw/toyVWCaP3dnCB4tBh3jf5HGdo0= -github.com/sourcegraph/mountinfo v0.0.0-20230106004439-7026e28cef67 h1:NSYSPQOE7yyyytLbKQHjxSkPnBagaGQblgVMQrQ1je0= -github.com/sourcegraph/mountinfo v0.0.0-20230106004439-7026e28cef67/go.mod h1:4DAabK408OEbyK2NUEQ5YRApyB/p0XNGJyC1YPBAKq4= +github.com/sourcegraph/mountinfo v0.0.0-20231018142932-e00da332dac5 h1:2lUb58rz1bq77wL3hb6OBT58uBVtlNs1o23Kahfj/kU= +github.com/sourcegraph/mountinfo v0.0.0-20231018142932-e00da332dac5/go.mod h1:ghoEiutaNVERt2cu5q/bU3HOo29AHGSPrRZE1sOaA0w= github.com/sourcegraph/run v0.12.0 h1:3A8w5e8HIYPfafHekvmdmmh42RHKGVhmiTZAPJclg7I= github.com/sourcegraph/run v0.12.0/go.mod h1:PwaP936BTnAJC1cqR5rSbG5kOs/EWStTK3lqvMX5GUA= github.com/sourcegraph/scip v0.3.1-0.20230627154934-45df7f6d33fc h1:o+eq0cjVV3B5ngIBF04Lv3GwttKOuYFF5NTcfXWXzfA= @@ -1592,8 +1592,8 @@ github.com/sourcegraph/tiktoken-go v0.0.0-20230905173153-caab340cf008 h1:Wu8W50q github.com/sourcegraph/tiktoken-go v0.0.0-20230905173153-caab340cf008/go.mod h1:9NiV+i9mJKGj1rYOT+njbv+ZwA/zJxYdewGl6qVatpg= github.com/sourcegraph/yaml v1.0.1-0.20200714132230-56936252f152 h1:z/MpntplPaW6QW95pzcAR/72Z5TWDyDnSo0EOcyij9o= github.com/sourcegraph/yaml v1.0.1-0.20200714132230-56936252f152/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I= -github.com/sourcegraph/zoekt v0.0.0-20231017111049-f17ff0bac96a h1:1Xa7GWtMdnatmIqzOsAhLigU+SttgXPvygKn0eMJZzc= -github.com/sourcegraph/zoekt v0.0.0-20231017111049-f17ff0bac96a/go.mod h1:gHfSe997J5w8zX5MGHFei/darZmml75Xvpoykwtknlo= +github.com/sourcegraph/zoekt v0.0.0-20231018143538-16e2ff8c98ee h1:2IPj5DTbBb8cAPF5ZJGIVxO81MTLpuF68YQPyr1KbzQ= +github.com/sourcegraph/zoekt v0.0.0-20231018143538-16e2ff8c98ee/go.mod h1:7KKGxC1hEyRVTZB9QEpktVC20ANFmKVwX6IdcWx7yk8= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v0.0.0-20170901052352-ee1bd8ee15a1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= diff --git a/internal/adminanalytics/cache.go b/internal/adminanalytics/cache.go index a95ac6fa615..1d4ae73265e 100644 --- a/internal/adminanalytics/cache.go +++ b/internal/adminanalytics/cache.go @@ -123,7 +123,7 @@ func refreshAnalyticsCache(ctx context.Context, db database.DB) error { var started bool func StartAnalyticsCacheRefresh(ctx context.Context, db database.DB) { - logger := log.Scoped("adminanalytics:cache-refresh", "admin analytics cache refresh") + logger := log.Scoped("adminanalytics:cache-refresh") if started { panic("already started") diff --git a/internal/api/internalapi/client.go b/internal/api/internalapi/client.go index fc5fa847f10..f465876d59b 100644 --- a/internal/api/internalapi/client.go +++ b/internal/api/internalapi/client.go @@ -54,7 +54,7 @@ type internalClient struct { var Client = &internalClient{ URL: frontendInternal.String(), getConfClient: syncx.OnceValues(func() (proto.ConfigServiceClient, error) { - logger := log.Scoped("internalapi", "") + logger := log.Scoped("internalapi") conn, err := defaults.Dial(frontendInternal.Host, logger) if err != nil { return nil, err diff --git a/internal/audit/integration/cmd/main.go b/internal/audit/integration/cmd/main.go index 7d8ccab5ef3..9b04d09fbab 100644 --- a/internal/audit/integration/cmd/main.go +++ b/internal/audit/integration/cmd/main.go @@ -31,7 +31,7 @@ func main() { defer callbacks.Sync() - logger := log.Scoped("test", "logger with sampling config") + logger := log.Scoped("test") logsCount, err := strconv.Atoi(os.Args[1]) if err != nil { diff --git a/internal/auth/accessrequest/handlers.go b/internal/auth/accessrequest/handlers.go index 43724272d78..b85c640e24a 100644 --- a/internal/auth/accessrequest/handlers.go +++ b/internal/auth/accessrequest/handlers.go @@ -20,7 +20,7 @@ import ( // HandleRequestAccess handles submission of the request access form. func HandleRequestAccess(logger log.Logger, db database.DB) http.HandlerFunc { - logger = logger.Scoped("HandleRequestAccess", "request access request handler") + logger = logger.Scoped("HandleRequestAccess") return func(w http.ResponseWriter, r *http.Request) { if !conf.IsAccessRequestEnabled() { logger.Error("experimental feature accessRequests is disabled, but received request") diff --git a/internal/auth/userpasswd/handlers.go b/internal/auth/userpasswd/handlers.go index c340fb37057..4a26626b249 100644 --- a/internal/auth/userpasswd/handlers.go +++ b/internal/auth/userpasswd/handlers.go @@ -57,7 +57,7 @@ type unlockUserAccountInfo struct { // HandleSignUp handles submission of the user signup form. func HandleSignUp(logger log.Logger, db database.DB, eventRecorder *telemetry.EventRecorder) http.HandlerFunc { - logger = logger.Scoped("HandleSignUp", "sign up request handler") + logger = logger.Scoped("HandleSignUp") return func(w http.ResponseWriter, r *http.Request) { if handleEnabledCheck(logger, w) { return @@ -72,7 +72,7 @@ func HandleSignUp(logger log.Logger, db database.DB, eventRecorder *telemetry.Ev // HandleSiteInit handles submission of the site initialization form, where the initial site admin user is created. func HandleSiteInit(logger log.Logger, db database.DB, events *telemetry.EventRecorder) http.HandlerFunc { - logger = logger.Scoped("HandleSiteInit", "initial size initialization request handler") + logger = logger.Scoped("HandleSiteInit") return func(w http.ResponseWriter, r *http.Request) { // This only succeeds if the site is not yet initialized and there are no users yet. It doesn't // allow signups after those conditions become true, so we don't need to check the builtin auth @@ -293,7 +293,7 @@ func getByEmailOrUsername(ctx context.Context, db database.DB, emailOrUsername s // The account will be locked out after consecutive failed attempts in a certain // period of time. func HandleSignIn(logger log.Logger, db database.DB, store LockoutStore, recorder *telemetry.EventRecorder) http.HandlerFunc { - logger = logger.Scoped("HandleSignin", "sign in request handler") + logger = logger.Scoped("HandleSignin") events := telemetry.NewBestEffortEventRecorder(logger, recorder) return func(w http.ResponseWriter, r *http.Request) { @@ -394,7 +394,7 @@ func HandleSignIn(logger log.Logger, db database.DB, store LockoutStore, recorde } func HandleUnlockAccount(logger log.Logger, _ database.DB, store LockoutStore) http.HandlerFunc { - logger = logger.Scoped("HandleUnlockAccount", "unlock account request handler") + logger = logger.Scoped("HandleUnlockAccount") return func(w http.ResponseWriter, r *http.Request) { if handleEnabledCheck(logger, w) { return @@ -506,7 +506,7 @@ func checkAccountLockout(store LockoutStore, user *types.User, event *database.S // HandleCheckUsernameTaken checks availability of username for signup form func HandleCheckUsernameTaken(logger log.Logger, db database.DB) http.HandlerFunc { - logger = logger.Scoped("HandleCheckUsernameTaken", "checks for username uniqueness") + logger = logger.Scoped("HandleCheckUsernameTaken") return func(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) username, err := NormalizeUsername(vars["username"]) diff --git a/internal/auth/userpasswd/reset_password.go b/internal/auth/userpasswd/reset_password.go index 2e9385f5c2f..e6cede6230d 100644 --- a/internal/auth/userpasswd/reset_password.go +++ b/internal/auth/userpasswd/reset_password.go @@ -39,7 +39,7 @@ func SendResetPasswordURLEmail(ctx context.Context, email, username string, rese // HandleResetPasswordInit initiates the builtin-auth password reset flow by sending a password-reset email. func HandleResetPasswordInit(logger log.Logger, db database.DB) http.HandlerFunc { - logger = logger.Scoped("HandleResetPasswordInit", "password reset initialization flow handler") + logger = logger.Scoped("HandleResetPasswordInit") return func(w http.ResponseWriter, r *http.Request) { if handleEnabledCheck(logger, w) { return @@ -96,7 +96,7 @@ func HandleResetPasswordInit(logger log.Logger, db database.DB) http.HandlerFunc // HandleResetPasswordCode resets the password if the correct code is provided, and also // verifies emails if the appropriate parameters are found. func HandleResetPasswordCode(logger log.Logger, db database.DB) http.HandlerFunc { - logger = logger.Scoped("HandleResetPasswordCode", "verifies password reset code requests handler") + logger = logger.Scoped("HandleResetPasswordCode") return func(w http.ResponseWriter, r *http.Request) { if handleEnabledCheck(logger, w) { diff --git a/internal/authz/providers/authz.go b/internal/authz/providers/authz.go index 84db5a7ab82..4873fa5ce53 100644 --- a/internal/authz/providers/authz.go +++ b/internal/authz/providers/authz.go @@ -46,7 +46,7 @@ func ProvidersFromConfig( warnings []string, invalidConnections []string, ) { - logger := log.Scoped("authz", " parse provider from config") + logger := log.Scoped("authz") allowAccessByDefault = true defer func() { diff --git a/internal/authz/providers/azuredevops/provider.go b/internal/authz/providers/azuredevops/provider.go index 1eeee9dfe74..baaa87447c0 100644 --- a/internal/authz/providers/azuredevops/provider.go +++ b/internal/authz/providers/azuredevops/provider.go @@ -107,7 +107,7 @@ func (p *Provider) FetchAccount(_ context.Context, _ *types.User, _ []*extsvc.Ac } func (p *Provider) FetchUserPerms(ctx context.Context, account *extsvc.Account, _ authz.FetchPermsOptions) (*authz.ExternalUserPermissions, error) { - logger := log.Scoped("azuredevops.FetchuserPerms", "logger for azuredevops provider") + logger := log.Scoped("azuredevops.FetchuserPerms") logger.Debug("starting FetchUserPerms", log.String("user ID", fmt.Sprintf("%#v", account.UserID))) profile, token, err := azuredevops.GetExternalAccountData(ctx, &account.AccountData) diff --git a/internal/authz/providers/github/github.go b/internal/authz/providers/github/github.go index 0c26abe6387..75e5ef1453e 100644 --- a/internal/authz/providers/github/github.go +++ b/internal/authz/providers/github/github.go @@ -60,7 +60,7 @@ type ProviderOptions struct { func NewProvider(urn string, opts ProviderOptions) *Provider { if opts.GitHubClient == nil { apiURL, _ := github.APIRoot(opts.GitHubURL) - opts.GitHubClient = github.NewV3Client(log.Scoped("provider.github.v3", "provider github client"), + opts.GitHubClient = github.NewV3Client(log.Scoped("provider.github.v3"), urn, apiURL, opts.BaseAuther, nil) } @@ -354,7 +354,7 @@ func (p *Provider) fetchCachedAuthenticatedUserPerms(ctx context.Context, logger // This may return a partial result if an error is encountered, e.g. via rate limits. func (p *Provider) fetchAuthenticatedUserPerms(ctx context.Context, cli client, accountID extsvc.AccountID, opts authz.FetchPermsOptions) (*authz.ExternalUserPermissions, error) { // 🚨 SECURITY: Use user token is required to only list repositories the user has access to. - logger := log.Scoped("fetchUserPermsByToken", "fetches all the private repo ids that the token can access.") + logger := log.Scoped("fetchUserPermsByToken") // Repository affiliations to list for - groupsCache only lists for a subset. Left // unset indicates all affiliations should be sync'd. diff --git a/internal/authz/providers/perforce/authz.go b/internal/authz/providers/perforce/authz.go index 2c2cb696eb6..cadb6c58453 100644 --- a/internal/authz/providers/perforce/authz.go +++ b/internal/authz/providers/perforce/authz.go @@ -50,7 +50,7 @@ func newAuthzProvider( return nil, nil } - logger := log.Scoped("authz", "parse providers from config") + logger := log.Scoped("authz") if err := licensing.Check(licensing.FeatureACLs); err != nil { return nil, err } diff --git a/internal/authz/providers/perforce/cmd/scanprotects/main.go b/internal/authz/providers/perforce/cmd/scanprotects/main.go index 2f6cd0f34d6..ddfa001b473 100644 --- a/internal/authz/providers/perforce/cmd/scanprotects/main.go +++ b/internal/authz/providers/perforce/cmd/scanprotects/main.go @@ -39,7 +39,7 @@ func main() { ignoreRulesWithHost := ignoreRulesWithHostFlag == nil || *ignoreRulesWithHostFlag - logger := log.Scoped("scanprotects", "") + logger := log.Scoped("scanprotects") run(logger, *depot, os.Stdin, ignoreRulesWithHost) } diff --git a/internal/authz/providers/perforce/protects.go b/internal/authz/providers/perforce/protects.go index a320da03bf2..fef0e1befc8 100644 --- a/internal/authz/providers/perforce/protects.go +++ b/internal/authz/providers/perforce/protects.go @@ -194,7 +194,7 @@ type protectsScanner struct { // It handles skipping comments, cleaning whitespace, parsing relevant fields, and // skipping entries that do not affect read access. func scanProtects(logger log.Logger, protects []*perforce.Protect, s *protectsScanner, ignoreRulesWithHost bool) error { - logger = logger.Scoped("scanProtects", "") + logger = logger.Scoped("scanProtects") for _, protect := range protects { // skip any rule that relies on particular client IP addresses or hostnames // this is the initial approach to address wrong behaviors @@ -295,7 +295,7 @@ func repoIncludesExcludesScanner(perms *authz.ExternalUserPermissions) *protects // fullRepoPermsScanner converts `p4 protects` to a 1:1 implementation of Sourcegraph // authorization, including sub-repo perms and exact depot-as-repo matches. func fullRepoPermsScanner(logger log.Logger, perms *authz.ExternalUserPermissions, configuredDepots []extsvc.RepoID) *protectsScanner { - logger = logger.Scoped("fullRepoPermsScanner", "") + logger = logger.Scoped("fullRepoPermsScanner") // Get glob equivalents of all depots var configuredDepotMatches []globMatch for _, depot := range configuredDepots { @@ -450,7 +450,7 @@ func trimDepotNameAndSlashes(s, depotName string) string { } func convertRulesForWildcardDepotMatch(match globMatch, depot extsvc.RepoID, patternsToGlob map[string]globMatch) []string { - logger := log.Scoped("convertRulesForWildcardDepotMatch", "") + logger := log.Scoped("convertRulesForWildcardDepotMatch") if !strings.Contains(match.pattern, "**") && !strings.Contains(match.pattern, "*") { return []string{match.pattern} } @@ -496,7 +496,7 @@ func convertRulesForWildcardDepotMatch(match globMatch, depot extsvc.RepoID, pat // allUsersScanner converts `p4 protects` to a map of users within the protection rules. func allUsersScanner(ctx context.Context, p *Provider, users map[string]struct{}) *protectsScanner { - logger := log.Scoped("allUsersScanner", "") + logger := log.Scoped("allUsersScanner") return &protectsScanner{ processLine: func(line p4ProtectLine) error { if line.isExclusion { diff --git a/internal/batches/background.go b/internal/batches/background.go index 22514138f34..cf9464739fb 100644 --- a/internal/batches/background.go +++ b/internal/batches/background.go @@ -30,7 +30,7 @@ func InitBackgroundJobs( // host, we manually check for BatchChangesCredentials. ctx = actor.WithInternalActor(ctx) - observationCtx := observation.NewContext(log.Scoped("batches.background", "batches background jobs")) + observationCtx := observation.NewContext(log.Scoped("batches.background")) bstore := store.New(db, observationCtx, key) syncRegistry := syncer.NewSyncRegistry(ctx, observationCtx, bstore, cf) diff --git a/internal/batches/reconciler/executor.go b/internal/batches/reconciler/executor.go index 0f89ba6a208..a81646489ff 100644 --- a/internal/batches/reconciler/executor.go +++ b/internal/batches/reconciler/executor.go @@ -31,7 +31,7 @@ import ( func executePlan(ctx context.Context, logger log.Logger, client gitserver.Client, sourcer sources.Sourcer, noSleepBeforeSync bool, tx *store.Store, plan *Plan) (afterDone func(store *store.Store), err error) { e := &executor{ client: client, - logger: logger.Scoped("executor", "An executor for a single Batch Changes reconciler plan"), + logger: logger.Scoped("executor"), sourcer: sourcer, noSleepBeforeSync: noSleepBeforeSync, tx: tx, diff --git a/internal/batches/service/service.go b/internal/batches/service/service.go index 7f729c6ae7e..8c2646c597d 100644 --- a/internal/batches/service/service.go +++ b/internal/batches/service/service.go @@ -49,12 +49,12 @@ func New(store *store.Store) *Service { // NewWithClock returns a Service the given clock used // to generate timestamps. func NewWithClock(store *store.Store, clock func() time.Time) *Service { - logger := sglog.Scoped("batches.Service", "batch changes service") + logger := sglog.Scoped("batches.Service") svc := &Service{ logger: logger, store: store, sourcer: sources.NewSourcer(httpcli.NewExternalClientFactory( - httpcli.NewLoggingMiddleware(logger.Scoped("sourcer", "batches sourcer")), + httpcli.NewLoggingMiddleware(logger.Scoped("sourcer")), )), clock: clock, operations: newOperations(store.ObservationCtx()), diff --git a/internal/batches/service/workspace_resolver.go b/internal/batches/service/workspace_resolver.go index dd2c14c4fe8..0ab15d3f347 100644 --- a/internal/batches/service/workspace_resolver.go +++ b/internal/batches/service/workspace_resolver.go @@ -75,7 +75,7 @@ type WorkspaceResolverBuilder func(tx *store.Store) WorkspaceResolver func NewWorkspaceResolver(s *store.Store) WorkspaceResolver { return &workspaceResolver{ store: s, - logger: log.Scoped("batches.workspaceResolver", "The batch changes execution workspace resolver"), + logger: log.Scoped("batches.workspaceResolver"), gitserverClient: gitserver.NewClient(), frontendInternalURL: internalapi.Client.URL + "/.internal", } diff --git a/internal/batches/sources/sources.go b/internal/batches/sources/sources.go index 03641c65182..908114bdacb 100644 --- a/internal/batches/sources/sources.go +++ b/internal/batches/sources/sources.go @@ -130,7 +130,7 @@ type sourcer struct { func newSourcer(cf *httpcli.Factory, csf changesetSourceFactory) Sourcer { return &sourcer{ - logger: log.Scoped("sourcer", "logger scoped to sources.sourcer"), + logger: log.Scoped("sourcer"), cf: cf, newSource: csf, } diff --git a/internal/batches/state/state.go b/internal/batches/state/state.go index 499c578af20..c2b88356f8f 100644 --- a/internal/batches/state/state.go +++ b/internal/batches/state/state.go @@ -41,7 +41,7 @@ func SetDerivedState(ctx context.Context, repoStore database.RepoStore, client g copy(events, es) sort.Sort(events) - logger := log.Scoped("SetDerivedState", "") + logger := log.Scoped("SetDerivedState") // We need to ensure we're using an internal actor here, since we need to // have access to the repo to set the derived state regardless of the actor diff --git a/internal/batches/store/worker_workspace_execution.go b/internal/batches/store/worker_workspace_execution.go index fefbd1aefca..3d70e851753 100644 --- a/internal/batches/store/worker_workspace_execution.go +++ b/internal/batches/store/worker_workspace_execution.go @@ -59,7 +59,7 @@ func NewBatchSpecWorkspaceExecutionWorkerStore(observationCtx *observation.Conte return &batchSpecWorkspaceExecutionWorkerStore{ Store: dbworkerstore.New(observationCtx, handle, batchSpecWorkspaceExecutionWorkerStoreOptions), observationCtx: observationCtx, - logger: log.Scoped("batch-spec-workspace-execution-worker-store", "The worker store backing the executor queue for Batch Changes"), + logger: log.Scoped("batch-spec-workspace-execution-worker-store"), } } diff --git a/internal/batches/syncer/syncer.go b/internal/batches/syncer/syncer.go index 8ecd327dcd4..8be132db9bd 100644 --- a/internal/batches/syncer/syncer.go +++ b/internal/batches/syncer/syncer.go @@ -54,7 +54,7 @@ var ( // NewSyncRegistry creates a new sync registry which starts a syncer for each code host and will update them // when external services are changed, added or removed. func NewSyncRegistry(ctx context.Context, observationCtx *observation.Context, bstore SyncStore, cf *httpcli.Factory) *SyncRegistry { - logger := observationCtx.Logger.Scoped("SyncRegistry", "starts a syncer for each code host and updates them") + logger := observationCtx.Logger.Scoped("SyncRegistry") ctx, cancel := context.WithCancel(ctx) return &SyncRegistry{ ctx: ctx, diff --git a/internal/codeintel/autoindexing/internal/background/dependencies/job_resetters.go b/internal/codeintel/autoindexing/internal/background/dependencies/job_resetters.go index ee384b361a0..c4b414702d8 100644 --- a/internal/codeintel/autoindexing/internal/background/dependencies/job_resetters.go +++ b/internal/codeintel/autoindexing/internal/background/dependencies/job_resetters.go @@ -14,7 +14,7 @@ import ( // records that are marked as being processed but are no longer being processed // by a worker. func NewIndexResetter(logger log.Logger, interval time.Duration, store dbworkerstore.Store[uploadsshared.Index], metrics *resetterMetrics) *dbworker.Resetter[uploadsshared.Index] { - return dbworker.NewResetter(logger.Scoped("indexResetter", ""), store, dbworker.ResetterOptions{ + return dbworker.NewResetter(logger.Scoped("indexResetter"), store, dbworker.ResetterOptions{ Name: "precise_code_intel_index_worker_resetter", Interval: interval, Metrics: dbworker.ResetterMetrics{ @@ -29,7 +29,7 @@ func NewIndexResetter(logger log.Logger, interval time.Duration, store dbworkers // dependency index records that are marked as being processed but are no longer being // processed by a worker. func NewDependencyIndexResetter(logger log.Logger, interval time.Duration, store dbworkerstore.Store[dependencyIndexingJob], metrics *resetterMetrics) *dbworker.Resetter[dependencyIndexingJob] { - return dbworker.NewResetter(logger.Scoped("dependencyIndexResetter", ""), store, dbworker.ResetterOptions{ + return dbworker.NewResetter(logger.Scoped("dependencyIndexResetter"), store, dbworker.ResetterOptions{ Name: "precise_code_intel_dependency_index_worker_resetter", Interval: interval, Metrics: dbworker.ResetterMetrics{ diff --git a/internal/codeintel/autoindexing/internal/background/init.go b/internal/codeintel/autoindexing/internal/background/init.go index a0eb88d8b6c..9b8df059dd6 100644 --- a/internal/codeintel/autoindexing/internal/background/init.go +++ b/internal/codeintel/autoindexing/internal/background/init.go @@ -89,8 +89,8 @@ func NewDependencyIndexSchedulers( config, ), - dependencies.NewIndexResetter(observationCtx.Logger.Scoped("indexResetter", ""), config.ResetterInterval, indexStore, metrics), - dependencies.NewDependencyIndexResetter(observationCtx.Logger.Scoped("dependencyIndexResetter", ""), config.ResetterInterval, dependencyIndexingStore, metrics), + dependencies.NewIndexResetter(observationCtx.Logger.Scoped("indexResetter"), config.ResetterInterval, indexStore, metrics), + dependencies.NewDependencyIndexResetter(observationCtx.Logger.Scoped("dependencyIndexResetter"), config.ResetterInterval, dependencyIndexingStore, metrics), } } diff --git a/internal/codeintel/autoindexing/internal/inference/infer.go b/internal/codeintel/autoindexing/internal/inference/infer.go index 57fb2020a86..f788e336279 100644 --- a/internal/codeintel/autoindexing/internal/inference/infer.go +++ b/internal/codeintel/autoindexing/internal/inference/infer.go @@ -62,7 +62,7 @@ func inferNpmRepositoryAndRevision(pkg dependencies.MinimialVersionedPackageRepo return "", "", false } - logger := log.Scoped("inferNpmRepositoryAndRevision", "") + logger := log.Scoped("inferNpmRepositoryAndRevision") npmPkg, err := reposource.ParseNpmPackageFromPackageSyntax(pkg.Name) if err != nil { logger.Error("invalid npm package name in database", log.Error(err)) diff --git a/internal/codeintel/autoindexing/internal/inference/init.go b/internal/codeintel/autoindexing/internal/inference/init.go index 6e6c74ec40d..3de9dff96dc 100644 --- a/internal/codeintel/autoindexing/internal/inference/init.go +++ b/internal/codeintel/autoindexing/internal/inference/init.go @@ -19,7 +19,7 @@ var ( ) func NewService(db database.DB) *Service { - observationCtx := observation.NewContext(log.Scoped("inference.service", "inference service")) + observationCtx := observation.NewContext(log.Scoped("inference.service")) return newService( observationCtx, diff --git a/internal/codeintel/autoindexing/internal/jobselector/job_selector.go b/internal/codeintel/autoindexing/internal/jobselector/job_selector.go index 0540cb2ab0d..b0a8b5be19e 100644 --- a/internal/codeintel/autoindexing/internal/jobselector/job_selector.go +++ b/internal/codeintel/autoindexing/internal/jobselector/job_selector.go @@ -123,7 +123,7 @@ func (s *JobSelector) GetIndexRecords(ctx context.Context, repositoryID int, com // explicitly via a GraphQL query parameter. If no configuration was supplield then a false valued // flag is returned. func makeExplicitConfigurationFactory(configuration string) configurationFactoryFunc { - logger := log.Scoped("explicitConfigurationFactory", "") + logger := log.Scoped("explicitConfigurationFactory") return func(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.Index, bool, error) { if configuration == "" { return nil, false, nil diff --git a/internal/codeintel/autoindexing/internal/store/store.go b/internal/codeintel/autoindexing/internal/store/store.go index 685bb0861ce..789da8966c8 100644 --- a/internal/codeintel/autoindexing/internal/store/store.go +++ b/internal/codeintel/autoindexing/internal/store/store.go @@ -63,7 +63,7 @@ type store struct { func New(observationCtx *observation.Context, db database.DB) Store { return &store{ db: basestore.NewWithHandle(db.Handle()), - logger: logger.Scoped("autoindexing.store", ""), + logger: logger.Scoped("autoindexing.store"), operations: newOperations(observationCtx), } } diff --git a/internal/codeintel/autoindexing/service.go b/internal/codeintel/autoindexing/service.go index 593b16d75dc..f340f216c6d 100644 --- a/internal/codeintel/autoindexing/service.go +++ b/internal/codeintel/autoindexing/service.go @@ -50,7 +50,7 @@ func newService( repoStore, inferenceSvc, gitserverClient, - log.Scoped("autoindexing job selector", ""), + log.Scoped("autoindexing job selector"), ) indexEnqueuer := enqueuer.NewIndexEnqueuer( diff --git a/internal/codeintel/codenav/service.go b/internal/codeintel/codenav/service.go index 4ba1b805abb..242d948801d 100644 --- a/internal/codeintel/codenav/service.go +++ b/internal/codeintel/codenav/service.go @@ -46,7 +46,7 @@ func newService( gitserver: gitserver, uploadSvc: uploadSvc, operations: newOperations(observationCtx), - logger: log.Scoped("codenav", ""), + logger: log.Scoped("codenav"), } } diff --git a/internal/codeintel/context/internal/store/store.go b/internal/codeintel/context/internal/store/store.go index e1f8045d23a..c119d65cff4 100644 --- a/internal/codeintel/context/internal/store/store.go +++ b/internal/codeintel/context/internal/store/store.go @@ -21,7 +21,7 @@ type store struct { func New(observationCtx *observation.Context, db database.DB) Store { return &store{ db: basestore.NewWithHandle(db.Handle()), - logger: logger.Scoped("context.store", ""), + logger: logger.Scoped("context.store"), operations: newOperations(observationCtx), } } diff --git a/internal/codeintel/policies/internal/store/store.go b/internal/codeintel/policies/internal/store/store.go index fbca3d8b5ed..71021c19767 100644 --- a/internal/codeintel/policies/internal/store/store.go +++ b/internal/codeintel/policies/internal/store/store.go @@ -38,7 +38,7 @@ type store struct { func New(observationCtx *observation.Context, db database.DB) Store { return &store{ db: basestore.NewWithHandle(db.Handle()), - logger: logger.Scoped("policies.store", ""), + logger: logger.Scoped("policies.store"), operations: newOperations(observationCtx), } } diff --git a/internal/codeintel/ranking/internal/store/store.go b/internal/codeintel/ranking/internal/store/store.go index 633bc0abb0e..5c84aede678 100644 --- a/internal/codeintel/ranking/internal/store/store.go +++ b/internal/codeintel/ranking/internal/store/store.go @@ -68,7 +68,7 @@ type store struct { func New(observationCtx *observation.Context, db database.DB) Store { return &store{ db: basestore.NewWithHandle(db.Handle()), - logger: logger.Scoped("ranking.store", ""), + logger: logger.Scoped("ranking.store"), operations: newOperations(observationCtx), } } diff --git a/internal/codeintel/sentinel/internal/background/downloader/job.go b/internal/codeintel/sentinel/internal/background/downloader/job.go index 0c235af533d..af47b918fe3 100644 --- a/internal/codeintel/sentinel/internal/background/downloader/job.go +++ b/internal/codeintel/sentinel/internal/background/downloader/job.go @@ -15,7 +15,7 @@ import ( func NewCVEDownloader(store store.Store, observationCtx *observation.Context, config *Config) goroutine.BackgroundRoutine { cveParser := &CVEParser{ store: store, - logger: log.Scoped("sentinel.parser", ""), + logger: log.Scoped("sentinel.parser"), } metrics := newMetrics(observationCtx) @@ -48,7 +48,7 @@ type CVEParser struct { func NewCVEParser() *CVEParser { return &CVEParser{ - logger: log.Scoped("sentinel.parser", ""), + logger: log.Scoped("sentinel.parser"), } } diff --git a/internal/codeintel/sentinel/internal/store/store.go b/internal/codeintel/sentinel/internal/store/store.go index f8b533272a3..d4b1f93dbb3 100644 --- a/internal/codeintel/sentinel/internal/store/store.go +++ b/internal/codeintel/sentinel/internal/store/store.go @@ -35,7 +35,7 @@ type store struct { func New(observationCtx *observation.Context, db database.DB) Store { return &store{ db: basestore.NewWithHandle(db.Handle()), - logger: logger.Scoped("sentinel.store", ""), + logger: logger.Scoped("sentinel.store"), operations: newOperations(observationCtx), } } diff --git a/internal/codeintel/uploads/internal/background/processor/job_resetters.go b/internal/codeintel/uploads/internal/background/processor/job_resetters.go index bd721cce48b..653091516ac 100644 --- a/internal/codeintel/uploads/internal/background/processor/job_resetters.go +++ b/internal/codeintel/uploads/internal/background/processor/job_resetters.go @@ -14,7 +14,7 @@ import ( // records that are marked as being processed but are no longer being processed // by a worker. func NewUploadResetter(logger log.Logger, store store.Store[shared.Upload], metrics *resetterMetrics) *dbworker.Resetter[shared.Upload] { - return dbworker.NewResetter(logger.Scoped("uploadResetter", ""), store, dbworker.ResetterOptions{ + return dbworker.NewResetter(logger.Scoped("uploadResetter"), store, dbworker.ResetterOptions{ Name: "precise_code_intel_upload_worker_resetter", Interval: 30 * time.Second, Metrics: dbworker.ResetterMetrics{ diff --git a/internal/codeintel/uploads/internal/store/cleanup_test.go b/internal/codeintel/uploads/internal/store/cleanup_test.go index 88cf38ef3f7..5c2d6838c0e 100644 --- a/internal/codeintel/uploads/internal/store/cleanup_test.go +++ b/internal/codeintel/uploads/internal/store/cleanup_test.go @@ -224,7 +224,7 @@ func TestProcessStaleSourcedCommits(t *testing.T) { db := database.NewDB(log, sqlDB) store := &store{ db: basestore.NewWithHandle(db.Handle()), - logger: logger.Scoped("autoindexing.store", ""), + logger: logger.Scoped("autoindexing.store"), operations: newOperations(&observation.TestContext), } diff --git a/internal/codeintel/uploads/internal/store/store.go b/internal/codeintel/uploads/internal/store/store.go index f215bf7c4c6..4e4397cb5d0 100644 --- a/internal/codeintel/uploads/internal/store/store.go +++ b/internal/codeintel/uploads/internal/store/store.go @@ -116,7 +116,7 @@ type store struct { func New(observationCtx *observation.Context, db database.DB) Store { return &store{ - logger: logger.Scoped("uploads.store", ""), + logger: logger.Scoped("uploads.store"), db: basestore.NewWithHandle(db.Handle()), operations: newOperations(observationCtx), } diff --git a/internal/codeintel/uploads/transport/http/auth/github.go b/internal/codeintel/uploads/transport/http/auth/github.go index 04e46cd566b..07017ef5a6d 100644 --- a/internal/codeintel/uploads/transport/http/auth/github.go +++ b/internal/codeintel/uploads/transport/http/auth/github.go @@ -56,7 +56,7 @@ func enforceAuthViaGitHub(ctx context.Context, query url.Values, repoName string var _ AuthValidator = enforceAuthViaGitHub func uncachedEnforceAuthViaGitHub(ctx context.Context, githubToken, repoName string) (int, error) { - logger := log.Scoped("uncachedEnforceAuthViaGitHub", "uncached authentication enforcement") + logger := log.Scoped("uncachedEnforceAuthViaGitHub") ghClient := github.NewV3Client(logger, extsvc.URNCodeIntel, githubURL, &auth.OAuthBearerToken{Token: githubToken}, nil) diff --git a/internal/codeintel/uploads/transport/http/handler.go b/internal/codeintel/uploads/transport/http/handler.go index 8a4c52cb6b7..1265ddf2b91 100644 --- a/internal/codeintel/uploads/transport/http/handler.go +++ b/internal/codeintel/uploads/transport/http/handler.go @@ -25,7 +25,7 @@ func newHandler( dbStore uploadhandler.DBStore[uploads.UploadMetadata], operations *uploadhandler.Operations, ) http.Handler { - logger := log.Scoped("UploadHandler", "") + logger := log.Scoped("UploadHandler") metadataFromRequest := func(ctx context.Context, r *http.Request) (uploads.UploadMetadata, int, error) { commit := getQuery(r, "commit") diff --git a/internal/codeintel/uploads/transport/http/init.go b/internal/codeintel/uploads/transport/http/init.go index 605d1998350..22cf2a5fe4c 100644 --- a/internal/codeintel/uploads/transport/http/init.go +++ b/internal/codeintel/uploads/transport/http/init.go @@ -26,7 +26,6 @@ func GetHandler(svc *uploads.Service, db database.DB, gitserverClient gitserver. handlerOnce.Do(func() { logger := log.Scoped( "uploads.handler", - "codeintel uploads http handler", ) observationCtx := observation.NewContext(logger) diff --git a/internal/codemonitors/background/background.go b/internal/codemonitors/background/background.go index a293dfd6089..8fff5fa9734 100644 --- a/internal/codemonitors/background/background.go +++ b/internal/codemonitors/background/background.go @@ -9,7 +9,7 @@ import ( ) func NewBackgroundJobs(observationCtx *observation.Context, db database.DB) []goroutine.BackgroundRoutine { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BackgroundJobs", "code monitors background jobs"), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("BackgroundJobs"), observationCtx) codeMonitorsStore := db.CodeMonitors() @@ -30,5 +30,5 @@ func NewBackgroundJobs(observationCtx *observation.Context, db database.DB) []go } func scopedContext(operation string, parent *observation.Context) *observation.Context { - return observation.ContextWithLogger(parent.Logger.Scoped(operation, ""), parent) + return observation.ContextWithLogger(parent.Logger.Scoped(operation), parent) } diff --git a/internal/codemonitors/background/metrics.go b/internal/codemonitors/background/metrics.go index e02e502c3bc..0a6307ff162 100644 --- a/internal/codemonitors/background/metrics.go +++ b/internal/codemonitors/background/metrics.go @@ -15,7 +15,7 @@ type codeMonitorsMetrics struct { } func newMetricsForTriggerQueries(observationCtx *observation.Context) codeMonitorsMetrics { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("triggers", "code monitor triggers"), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("triggers"), observationCtx) resetFailures := prometheus.NewCounter(prometheus.CounterOpts{ Name: "src_codemonitors_query_reset_failures_total", @@ -44,7 +44,7 @@ func newMetricsForTriggerQueries(observationCtx *observation.Context) codeMonito } func newActionMetrics(observationCtx *observation.Context) codeMonitorsMetrics { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("actions", "code monitors actions"), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("actions"), observationCtx) resetFailures := prometheus.NewCounter(prometheus.CounterOpts{ Name: "src_codemonitors_action_reset_failures_total", diff --git a/internal/codemonitors/background/workers.go b/internal/codemonitors/background/workers.go index 393a963013b..5dc4d605823 100644 --- a/internal/codemonitors/background/workers.go +++ b/internal/codemonitors/background/workers.go @@ -123,7 +123,7 @@ func newActionJobResetter(_ context.Context, observationCtx *observation.Context } func createDBWorkerStoreForTriggerJobs(observationCtx *observation.Context, s basestore.ShareableStore) dbworkerstore.Store[*database.TriggerJob] { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("triggerJobs.dbworker.Store", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("triggerJobs.dbworker.Store"), observationCtx) return dbworkerstore.New(observationCtx, s.Handle(), dbworkerstore.Options[*database.TriggerJob]{ Name: "code_monitors_trigger_jobs_worker_store", @@ -138,7 +138,7 @@ func createDBWorkerStoreForTriggerJobs(observationCtx *observation.Context, s ba } func createDBWorkerStoreForActionJobs(observationCtx *observation.Context, s database.CodeMonitorStore) dbworkerstore.Store[*database.ActionJob] { - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("actionJobs.dbworker.Store", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("actionJobs.dbworker.Store"), observationCtx) return dbworkerstore.New(observationCtx, s.Handle(), dbworkerstore.Options[*database.ActionJob]{ Name: "code_monitors_action_jobs_worker_store", @@ -278,7 +278,7 @@ func (r *actionRunner) handleEmail(ctx context.Context, j *database.ActionJob) e if rec.NamespaceUserID == nil { return errors.New("nil recipient") } - err = SendEmailForNewSearchResult(ctx, database.NewDBWith(log.Scoped("handleEmail", ""), r.CodeMonitorStore), *rec.NamespaceUserID, data) + err = SendEmailForNewSearchResult(ctx, database.NewDBWith(log.Scoped("handleEmail"), r.CodeMonitorStore), *rec.NamespaceUserID, data) if err != nil { return err } diff --git a/internal/completions/client/observe.go b/internal/completions/client/observe.go index 2364f6ce249..d5475a1ad38 100644 --- a/internal/completions/client/observe.go +++ b/internal/completions/client/observe.go @@ -13,7 +13,7 @@ import ( ) func newObservedClient(inner types.CompletionsClient) *observedClient { - observationCtx := observation.NewContext(log.Scoped("completions", "completions client")) + observationCtx := observation.NewContext(log.Scoped("completions")) ops := newOperations(observationCtx) return &observedClient{ inner: inner, diff --git a/internal/completions/httpapi/chat.go b/internal/completions/httpapi/chat.go index 25f73e1b6ba..f70e73fb4ad 100644 --- a/internal/completions/httpapi/chat.go +++ b/internal/completions/httpapi/chat.go @@ -13,7 +13,7 @@ import ( // NewChatCompletionsStreamHandler is an http handler which streams back completions results. func NewChatCompletionsStreamHandler(logger log.Logger, db database.DB) http.Handler { - logger = logger.Scoped("chat", "chat completions handler") + logger = logger.Scoped("chat") rl := NewRateLimiter(db, redispool.Store, types.CompletionsFeatureChat) return newCompletionsHandler( diff --git a/internal/completions/httpapi/codecompletion.go b/internal/completions/httpapi/codecompletion.go index 6e821cac823..2582160a79e 100644 --- a/internal/completions/httpapi/codecompletion.go +++ b/internal/completions/httpapi/codecompletion.go @@ -15,7 +15,7 @@ import ( // NewCodeCompletionsHandler is an http handler which sends back code completion results. func NewCodeCompletionsHandler(logger log.Logger, db database.DB) http.Handler { - logger = logger.Scoped("code", "code completions handler") + logger = logger.Scoped("code") rl := NewRateLimiter(db, redispool.Store, types.CompletionsFeatureCode) return newCompletionsHandler( logger, diff --git a/internal/compute/output_command.go b/internal/compute/output_command.go index feb6b56369b..af6cdf9dbf7 100644 --- a/internal/compute/output_command.go +++ b/internal/compute/output_command.go @@ -137,7 +137,7 @@ func (c *Output) Run(ctx context.Context, _ gitserver.Client, r result.Match) (R return nil, err } - textResult, err := toTextResult(ctx, log.Scoped("compute", ""), content, c.SearchPattern, outputPattern, c.Separator, c.Selector) + textResult, err := toTextResult(ctx, log.Scoped("compute"), content, c.SearchPattern, outputPattern, c.Separator, c.Selector) if err != nil { return nil, err } diff --git a/internal/compute/replace_command.go b/internal/compute/replace_command.go index 7cb02300f87..552121e480a 100644 --- a/internal/compute/replace_command.go +++ b/internal/compute/replace_command.go @@ -31,7 +31,7 @@ func replace(ctx context.Context, content []byte, matchPattern MatchPattern, rep case *Regexp: newContent = match.Value.ReplaceAllString(string(content), replacePattern) case *Comby: - replacements, err := comby.Replacements(ctx, log.Scoped("compute", ""), comby.Args{ + replacements, err := comby.Replacements(ctx, log.Scoped("compute"), comby.Args{ Input: comby.FileContent(content), MatchTemplate: match.Value, RewriteTemplate: replacePattern, diff --git a/internal/conf/client.go b/internal/conf/client.go index 7809ca9daf4..1420c1cfb4f 100644 --- a/internal/conf/client.go +++ b/internal/conf/client.go @@ -239,7 +239,7 @@ func (c *client) continuouslyUpdate(optOnlySetByTests *continuousUpdateOptions) // database in most cases, to avoid log spam when running sourcegraph/server for the // first time. delayBeforeUnreachableLog: 15 * time.Second, - logger: log.Scoped("conf.client", "configuration client"), + logger: log.Scoped("conf.client"), sleepBetweenUpdates: func() { jitter := time.Duration(rand.Int63n(5 * int64(time.Second))) time.Sleep(jitter) diff --git a/internal/conf/conf.go b/internal/conf/conf.go index c77634fc45b..fc4e75416f4 100644 --- a/internal/conf/conf.go +++ b/internal/conf/conf.go @@ -232,7 +232,7 @@ func startSiteConfigEscapeHatchWorker(c ConfigurationSource) { ctx = context.Background() lastKnownFileContents, lastKnownDBContents string lastKnownConfigID int32 - logger = sglog.Scoped("SiteConfigEscapeHatch", "escape hatch for site config").With(sglog.String("path", siteConfigEscapeHatchPath)) + logger = sglog.Scoped("SiteConfigEscapeHatch").With(sglog.String("path", siteConfigEscapeHatchPath)) ) go func() { // First, ensure we populate the file with what is currently in the DB. diff --git a/internal/database/assigned_owners.go b/internal/database/assigned_owners.go index 944bebb9d1d..77f7c6bcb4a 100644 --- a/internal/database/assigned_owners.go +++ b/internal/database/assigned_owners.go @@ -29,7 +29,7 @@ type AssignedOwnerSummary struct { } func AssignedOwnersStoreWith(other basestore.ShareableStore, logger log.Logger) AssignedOwnersStore { - lgr := logger.Scoped("AssignedOwnersStore", "Store for a table containing manually assigned code owners") + lgr := logger.Scoped("AssignedOwnersStore") return &assignedOwnersStore{Store: basestore.NewWithHandle(other.Handle()), Logger: lgr} } diff --git a/internal/database/assigned_teams.go b/internal/database/assigned_teams.go index 678ce25db05..11e72833808 100644 --- a/internal/database/assigned_teams.go +++ b/internal/database/assigned_teams.go @@ -27,7 +27,7 @@ type AssignedTeamSummary struct { } func AssignedTeamsStoreWith(other basestore.ShareableStore, logger log.Logger) AssignedTeamsStore { - lgr := logger.Scoped("AssignedTeamsStore", "Store for a table containing manually assigned team code owners") + lgr := logger.Scoped("AssignedTeamsStore") return &assignedTeamsStore{Store: basestore.NewWithHandle(other.Handle()), Logger: lgr} } diff --git a/internal/database/basestore/handle.go b/internal/database/basestore/handle.go index 075ef28f00a..0c90ef10b9b 100644 --- a/internal/database/basestore/handle.go +++ b/internal/database/basestore/handle.go @@ -59,7 +59,7 @@ var ( func NewHandleWithDB(logger log.Logger, db *sql.DB, txOptions sql.TxOptions) TransactableHandle { return &dbHandle{ DB: db, - logger: logger.Scoped("db-handle", "internal database"), + logger: logger.Scoped("db-handle"), txOptions: txOptions, } } @@ -69,7 +69,7 @@ func NewHandleWithTx(tx *sql.Tx, txOptions sql.TxOptions) TransactableHandle { return &txHandle{ lockingTx: &lockingTx{ tx: tx, - logger: log.Scoped("db-handle", "internal database"), + logger: log.Scoped("db-handle"), }, txOptions: txOptions, } diff --git a/internal/database/batch/batch.go b/internal/database/batch/batch.go index 89e68625e21..8a700b56810 100644 --- a/internal/database/batch/batch.go +++ b/internal/database/batch/batch.go @@ -175,7 +175,7 @@ func NewInserterWithReturn( querySuffix := makeQuerySuffix(numColumns, maxNumParameters) onConflictSuffix := makeOnConflictSuffix(onConflictClause) returningSuffix := makeReturningSuffix(returningColumnNames) - logger := sglog.Scoped("Inserter", "") + logger := sglog.Scoped("Inserter") return &Inserter{ db: db, diff --git a/internal/database/code_monitors.go b/internal/database/code_monitors.go index 39dc0af9a15..b31113d90c0 100644 --- a/internal/database/code_monitors.go +++ b/internal/database/code_monitors.go @@ -112,7 +112,7 @@ func CodeMonitorsWith(other basestore.ShareableStore) *codeMonitorStore { // clock for timestamps. func CodeMonitorsWithClock(other basestore.ShareableStore, clock func() time.Time) *codeMonitorStore { handle := basestore.NewWithHandle(other.Handle()) - return &codeMonitorStore{Store: handle, userStore: UsersWith(log.Scoped("codemonitors", ""), handle), now: clock} + return &codeMonitorStore{Store: handle, userStore: UsersWith(log.Scoped("codemonitors"), handle), now: clock} } // Clock returns the clock of the underlying store. diff --git a/internal/database/conf.go b/internal/database/conf.go index 0f8e317f7c2..18181852e1d 100644 --- a/internal/database/conf.go +++ b/internal/database/conf.go @@ -67,7 +67,7 @@ var ErrNewerEdit = errors.New("someone else has already applied a newer edit") func ConfStoreWith(other basestore.ShareableStore) ConfStore { return &confStore{ Store: basestore.NewWithHandle(other.Handle()), - logger: log.Scoped("confStore", "database confStore"), + logger: log.Scoped("confStore"), } } diff --git a/internal/database/database.go b/internal/database/database.go index ea791698f1d..14fa78adff0 100644 --- a/internal/database/database.go +++ b/internal/database/database.go @@ -135,11 +135,11 @@ func (d *db) Done(err error) error { } func (d *db) AccessTokens() AccessTokenStore { - return AccessTokensWith(d.Store, d.logger.Scoped("AccessTokenStore", "")) + return AccessTokensWith(d.Store, d.logger.Scoped("AccessTokenStore")) } func (d *db) AccessRequests() AccessRequestStore { - return AccessRequestsWith(d.Store, d.logger.Scoped("AccessRequestStore", "")) + return AccessRequestsWith(d.Store, d.logger.Scoped("AccessRequestStore")) } func (d *db) BitbucketProjectPermissions() BitbucketProjectPermissionsStore { @@ -304,7 +304,7 @@ func (d *db) TemporarySettings() TemporarySettingsStore { func (d *db) TelemetryEventsExportQueue() TelemetryEventsExportQueueStore { return TelemetryEventsExportQueueWith( - d.logger.Scoped("telemetry_events", "telemetry events export queue store"), + d.logger.Scoped("telemetry_events"), d.Store, ) } diff --git a/internal/database/dbconn/rds/rds.go b/internal/database/dbconn/rds/rds.go index 085957bceb6..6ca4b2cc97a 100644 --- a/internal/database/dbconn/rds/rds.go +++ b/internal/database/dbconn/rds/rds.go @@ -26,7 +26,7 @@ func NewUpdater() *Updater { } func (u *Updater) ShouldUpdate(cfg *pgx.ConnConfig) bool { - logger := log.Scoped("rds", "shouldUpdate") + logger := log.Scoped("rds") token, err := parseRDSAuthToken(cfg.Password) if err != nil { logger.Warn("Error parsing RDS auth token, refreshing", log.Error(err)) @@ -37,7 +37,7 @@ func (u *Updater) ShouldUpdate(cfg *pgx.ConnConfig) bool { } func (u *Updater) Update(cfg *pgx.ConnConfig) (*pgx.ConnConfig, error) { - logger := log.Scoped("rds", "update") + logger := log.Scoped("rds") if cfg.Password != "" { // only output the warning once, or it will emit a new entry on every connection syncx.OnceFunc(func() { diff --git a/internal/database/migration/cliutil/addlog.go b/internal/database/migration/cliutil/addlog.go index e811664977c..5844faa0f31 100644 --- a/internal/database/migration/cliutil/addlog.go +++ b/internal/database/migration/cliutil/addlog.go @@ -35,7 +35,7 @@ func AddLog(commandName string, factory RunnerFactory, outFactory OutputFactory) schemaName = TranslateSchemaNames(schemaNameFlag.Get(cmd), out) versionFlag = versionFlag.Get(cmd) upFlag = upFlag.Get(cmd) - logger = log.Scoped("up", "migration up command") + logger = log.Scoped("up") ) store, err := setupStore(ctx, factory, schemaName) diff --git a/internal/database/migration/store/extractor.go b/internal/database/migration/store/extractor.go index a93d703bac2..09e04618b9e 100644 --- a/internal/database/migration/store/extractor.go +++ b/internal/database/migration/store/extractor.go @@ -31,7 +31,7 @@ func ExtractDatabase(ctx context.Context, r *runner.Runner) (database.DB, error) return nil, err } - return database.NewDB(log.Scoped("migrator", ""), db), nil + return database.NewDB(log.Scoped("migrator"), db), nil } func ExtractDB(ctx context.Context, r *runner.Runner, schemaName string) (*sql.DB, error) { diff --git a/internal/database/recent_view_signal.go b/internal/database/recent_view_signal.go index 53291e9bac1..0e9c4dcf7fe 100644 --- a/internal/database/recent_view_signal.go +++ b/internal/database/recent_view_signal.go @@ -54,7 +54,7 @@ type RecentViewSummary struct { } func RecentViewSignalStoreWith(other basestore.ShareableStore, logger log.Logger) RecentViewSignalStore { - lgr := logger.Scoped("RecentViewSignalStore", "Store for a table containing a number of views of a single file by a given viewer") + lgr := logger.Scoped("RecentViewSignalStore") return &recentViewSignalStore{Store: basestore.NewWithHandle(other.Handle()), Logger: lgr} } diff --git a/internal/database/security_event_logs.go b/internal/database/security_event_logs.go index 2de5d05ad69..c616dc4f2f7 100644 --- a/internal/database/security_event_logs.go +++ b/internal/database/security_event_logs.go @@ -111,7 +111,7 @@ type securityEventLogsStore struct { // SecurityEventLogsWith instantiates and returns a new SecurityEventLogsStore // using the other store handle, and a scoped sub-logger of the passed base logger. func SecurityEventLogsWith(baseLogger log.Logger, other basestore.ShareableStore) SecurityEventLogsStore { - logger := baseLogger.Scoped("SecurityEvents", "Security events store") + logger := baseLogger.Scoped("SecurityEvents") return &securityEventLogsStore{logger: logger, Store: basestore.NewWithHandle(other.Handle())} } diff --git a/internal/debugserver/grpcui.go b/internal/debugserver/grpcui.go index 34420b584ef..22167b4dbb4 100644 --- a/internal/debugserver/grpcui.go +++ b/internal/debugserver/grpcui.go @@ -22,7 +22,7 @@ const gRPCWebUIPath = "/debug/grpcui" // // serviceName is the name of the gRPC service that will be displayed on the debug page. func NewGRPCWebUIEndpoint(serviceName, target string) Endpoint { - logger := log.Scoped("gRPCWebUI", "HTTP handler for serving the gRPC Web UI explore page") + logger := log.Scoped("gRPCWebUI") var handler http.Handler = &grpcHandler{ target: target, diff --git a/internal/endpoint/endpoint.go b/internal/endpoint/endpoint.go index 57a9570d588..8c7feea3686 100644 --- a/internal/endpoint/endpoint.go +++ b/internal/endpoint/endpoint.go @@ -67,7 +67,7 @@ type endpoints struct { // Note: this function does not take a logger because discovery is done in the // in the background and does not connect to higher order functions. func New(urlspec string) *Map { - logger := log.Scoped("newmap", "A new map for the endpoing URL") + logger := log.Scoped("newmap") if !strings.HasPrefix(urlspec, "k8s+") { return Static(strings.Fields(urlspec)...) } @@ -203,7 +203,7 @@ func (m *Map) discover() { } func (m *Map) sync(ch chan endpoints, ready chan struct{}) { - logger := log.Scoped("endpoint", "A kubernetes endpoint that represents a service") + logger := log.Scoped("endpoint") for eps := range ch { logger.Info( diff --git a/internal/endpoint/k8s.go b/internal/endpoint/k8s.go index d29cd04a7ba..b992f009ade 100644 --- a/internal/endpoint/k8s.go +++ b/internal/endpoint/k8s.go @@ -24,7 +24,7 @@ import ( // K8S returns a Map for the given k8s urlspec (e.g. k8s+http://searcher), starting // service discovery in the background. func K8S(logger log.Logger, urlspec string) *Map { - logger = logger.Scoped("k8s", "service discovery via k8s") + logger = logger.Scoped("k8s") return &Map{ urlspec: urlspec, discofunk: k8sDiscovery(logger, urlspec, namespace(logger), loadClient), @@ -193,7 +193,7 @@ func parseURL(rawurl string) (*k8sURL, error) { // this is done because the k8s client we previously used set the namespace // when the client was created, the official k8s client does not func namespace(logger log.Logger) string { - logger = logger.Scoped("namespace", "A kubernetes namespace") + logger = logger.Scoped("namespace") const filename = "/var/run/secrets/kubernetes.io/serviceaccount/namespace" data, err := os.ReadFile(filename) if err != nil { diff --git a/internal/extsvc/azuredevops/client.go b/internal/extsvc/azuredevops/client.go index 2a82e0134e6..407f2e05c38 100644 --- a/internal/extsvc/azuredevops/client.go +++ b/internal/extsvc/azuredevops/client.go @@ -84,7 +84,7 @@ func NewClient(urn string, url string, auth auth.Authenticator, httpClient httpc return &client{ httpClient: httpClient, URL: u, - internalRateLimiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("AzureDevOpsClient", ""), urn)), + internalRateLimiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("AzureDevOpsClient"), urn)), externalRateLimiter: ratelimit.DefaultMonitorRegistry.GetOrSet(url, auth.Hash(), "rest", &ratelimit.Monitor{HeaderPrefix: "X-"}), auth: auth, urn: urn, @@ -133,7 +133,7 @@ func (c *client) do(ctx context.Context, req *http.Request, urlOverride string, _ = c.externalRateLimiter.WaitForRateLimit(ctx, 1) } - logger := log.Scoped("azuredevops.Client", "azuredevops Client logger") + logger := log.Scoped("azuredevops.Client") resp, err := oauthutil.DoRequest(ctx, logger, c.httpClient, req, c.auth, func(r *http.Request) (*http.Response, error) { return c.httpClient.Do(r) }) diff --git a/internal/extsvc/bitbucketcloud/client.go b/internal/extsvc/bitbucketcloud/client.go index 1118f5a77e6..ec857fc5b0f 100644 --- a/internal/extsvc/bitbucketcloud/client.go +++ b/internal/extsvc/bitbucketcloud/client.go @@ -111,7 +111,7 @@ func newClient(urn string, config *schema.BitbucketCloudConnection, httpClient h Password: config.AppPassword, }, // Default limits are defined in extsvc.GetLimitFromConfig - rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("BitbucketCloudClient", ""), urn)), + rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("BitbucketCloudClient"), urn)), }, nil } diff --git a/internal/extsvc/bitbucketserver/client.go b/internal/extsvc/bitbucketserver/client.go index 46202248765..c1416bd1e58 100644 --- a/internal/extsvc/bitbucketserver/client.go +++ b/internal/extsvc/bitbucketserver/client.go @@ -106,7 +106,7 @@ func newClient(urn string, config *schema.BitbucketServerConnection, httpClient httpClient: httpClient, URL: u, // Default limits are defined in extsvc.GetLimitFromConfig - rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("BitbucketServerClient", ""), urn)), + rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("BitbucketServerClient"), urn)), }, nil } diff --git a/internal/extsvc/crates/client.go b/internal/extsvc/crates/client.go index 981dd0bf441..6b044d427b6 100644 --- a/internal/extsvc/crates/client.go +++ b/internal/extsvc/crates/client.go @@ -26,7 +26,7 @@ func NewClient(urn string, httpfactory *httpcli.Factory) (*Client, error) { } return &Client{ uncachedClient: uncached, - limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("RustCratesClient", ""), urn)), + limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("RustCratesClient"), urn)), }, nil } diff --git a/internal/extsvc/gerrit/client.go b/internal/extsvc/gerrit/client.go index 569952c0287..c232e1cc469 100644 --- a/internal/extsvc/gerrit/client.go +++ b/internal/extsvc/gerrit/client.go @@ -70,7 +70,7 @@ func NewClient(urn string, url *url.URL, creds *AccountCredentials, httpClient h return &client{ httpClient: httpClient, URL: url, - rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GerritClient", ""), urn)), + rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GerritClient"), urn)), auther: auther, }, nil } diff --git a/internal/extsvc/github/globallock.go b/internal/extsvc/github/globallock.go index 09a49f4b8b5..127eb392a19 100644 --- a/internal/extsvc/github/globallock.go +++ b/internal/extsvc/github/globallock.go @@ -48,7 +48,7 @@ var metricLockRequestDurationGauge = promauto.NewHistogram(prometheus.HistogramO }) func restrictGitHubDotComConcurrency(logger log.Logger, doer httpcli.Doer, r *http.Request) (*http.Response, error) { - logger = logger.Scoped("githubcom-concurrency-limiter", "Limits concurrency to 1 per token against GitHub.com to prevent abuse detection") + logger = logger.Scoped("githubcom-concurrency-limiter") var token string if v := r.Header["Authorization"]; len(v) > 0 { fields := strings.Fields(v[0]) diff --git a/internal/extsvc/github/v3.go b/internal/extsvc/github/v3.go index 89f35df2c9e..817e9401170 100644 --- a/internal/extsvc/github/v3.go +++ b/internal/extsvc/github/v3.go @@ -112,11 +112,11 @@ func newV3Client(logger log.Logger, urn string, apiURL *url.URL, a auth.Authenti tokenHash = a.Hash() } - rl := ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitHubClient", ""), urn)) + rl := ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitHubClient"), urn)) rlm := ratelimit.DefaultMonitorRegistry.GetOrSet(apiURL.String(), tokenHash, resource, &ratelimit.Monitor{HeaderPrefix: "X-"}) return &V3Client{ - log: logger.Scoped("github.v3", "github v3 client"). + log: logger.Scoped("github.v3"). With( log.String("urn", urn), log.String("resource", resource), diff --git a/internal/extsvc/github/v4.go b/internal/extsvc/github/v4.go index 0b6385cd7e9..8ab7d092f43 100644 --- a/internal/extsvc/github/v4.go +++ b/internal/extsvc/github/v4.go @@ -93,11 +93,11 @@ func NewV4Client(urn string, apiURL *url.URL, a auth.Authenticator, cli httpcli. tokenHash = a.Hash() } - rl := ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitHubClient", ""), urn)) + rl := ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitHubClient"), urn)) rlm := ratelimit.DefaultMonitorRegistry.GetOrSet(apiURL.String(), tokenHash, "graphql", &ratelimit.Monitor{HeaderPrefix: "X-"}) return &V4Client{ - log: log.Scoped("github.v4", "github v4 client"), + log: log.Scoped("github.v4"), urn: urn, apiURL: apiURL, githubDotCom: URLIsGitHubDotCom(apiURL), @@ -387,7 +387,7 @@ func (c *V4Client) fetchGitHubVersion(ctx context.Context) (version *semver.Vers } // Initiate a v3Client since this requires a V3 API request. - logger := c.log.Scoped("fetchGitHubVersion", "temporary client for fetching github version") + logger := c.log.Scoped("fetchGitHubVersion") v3Client := NewV3Client(logger, c.urn, c.apiURL, c.auth, c.httpClient) v, err := v3Client.GetVersion(ctx) if err != nil { @@ -649,7 +649,7 @@ fragment RepositoryFields on Repository { } func (c *V4Client) GetRepo(ctx context.Context, owner, repo string) (*Repository, error) { - logger := c.log.Scoped("GetRepo", "temporary client for getting GitHub repository") + logger := c.log.Scoped("GetRepo") // We technically don't need to use the REST API for this but it's just a bit easier. return NewV3Client(logger, c.urn, c.apiURL, c.auth, c.httpClient).GetRepo(ctx, owner, repo) } @@ -660,7 +660,7 @@ func (c *V4Client) GetRepo(ctx context.Context, owner, repo string) (*Repository func (c *V4Client) Fork(ctx context.Context, owner, repo string, org *string, forkName string) (*Repository, error) { // Unfortunately, the GraphQL API doesn't provide a mutation to fork as of // December 2021, so we have to fall back to the REST API. - logger := c.log.Scoped("Fork", "temporary client for forking GitHub repository") + logger := c.log.Scoped("Fork") return NewV3Client(logger, c.urn, c.apiURL, c.auth, c.httpClient).Fork(ctx, owner, repo, org, forkName) } @@ -668,7 +668,7 @@ func (c *V4Client) Fork(ctx context.Context, owner, repo string, org *string, fo func (c *V4Client) DeleteBranch(ctx context.Context, owner, repo, branch string) error { // Unfortunately, the GraphQL API doesn't provide a mutation to delete a ref/branch as // of May 2023, so we have to fall back to the REST API. - logger := c.log.Scoped("DeleteBranch", "temporary client for deleting a branch") + logger := c.log.Scoped("DeleteBranch") return NewV3Client(logger, c.urn, c.apiURL, c.auth, c.httpClient).DeleteBranch(ctx, owner, repo, branch) } @@ -676,14 +676,14 @@ func (c *V4Client) DeleteBranch(ctx context.Context, owner, repo, branch string) // be supplied in a fully qualified format, such as `refs/heads/branch` or // `refs/tags/tag`. func (c *V4Client) GetRef(ctx context.Context, owner, repo, ref string) (*restCommitRef, error) { - logger := c.log.Scoped("GetRef", "temporary client for getting a ref on GitHub") + logger := c.log.Scoped("GetRef") // We technically don't need to use the REST API for this but it's just a bit easier. return NewV3Client(logger, c.urn, c.apiURL, c.auth, c.httpClient).GetRef(ctx, owner, repo, ref) } // CreateCommit creates a commit in the given repository based on a tree object. func (c *V4Client) CreateCommit(ctx context.Context, owner, repo, message, tree string, parents []string, author, committer *restAuthorCommiter) (*RestCommit, error) { - logger := c.log.Scoped("CreateCommit", "temporary client for creating a commit on GitHub") + logger := c.log.Scoped("CreateCommit") // As of May 2023, the GraphQL API does not expose any mutations for creating commits // other than one which requires sending the entire file contents for any files // changed by the commit, which is not feasible for creating large commits. Therefore, @@ -695,7 +695,7 @@ func (c *V4Client) CreateCommit(ctx context.Context, owner, repo, message, tree // UpdateRef updates the ref of a branch to point to the given commit. The ref should be // supplied in a fully qualified format, such as `refs/heads/branch` or `refs/tags/tag`. func (c *V4Client) UpdateRef(ctx context.Context, owner, repo, ref, commit string) (*restUpdatedRef, error) { - logger := c.log.Scoped("UpdateRef", "temporary client for updating a ref on GitHub") + logger := c.log.Scoped("UpdateRef") // We technically don't need to use the REST API for this but it's just a bit easier. return NewV3Client(logger, c.urn, c.apiURL, c.auth, c.httpClient).UpdateRef(ctx, owner, repo, ref, commit) } diff --git a/internal/extsvc/gitlab/client.go b/internal/extsvc/gitlab/client.go index f7080503f35..bb05d82af97 100644 --- a/internal/extsvc/gitlab/client.go +++ b/internal/extsvc/gitlab/client.go @@ -180,12 +180,12 @@ func (p *ClientProvider) NewClient(a auth.Authenticator) *Client { } projCache := rcache.NewWithTTL(key, int(cacheTTL/time.Second)) - rl := ratelimit.NewInstrumentedLimiter(p.urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitLabClient", ""), p.urn)) + rl := ratelimit.NewInstrumentedLimiter(p.urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitLabClient"), p.urn)) rlm := ratelimit.DefaultMonitorRegistry.GetOrSet(p.baseURL.String(), tokenHash, "rest", &ratelimit.Monitor{}) return &Client{ urn: p.urn, - log: log.Scoped("gitlabAPIClient", "client used to make API requests to Gitlab."), + log: log.Scoped("gitlabAPIClient"), baseURL: p.baseURL, httpClient: p.httpClient, projCache: projCache, @@ -268,7 +268,7 @@ func (c *Client) doWithBaseURL(ctx context.Context, req *http.Request, result an // to cache server-side req.Header.Set("Cache-Control", "max-age=0") - resp, err = oauthutil.DoRequest(ctx, log.Scoped("gitlab client", "do request"), c.httpClient, req, c.Auth, func(r *http.Request) (*http.Response, error) { + resp, err = oauthutil.DoRequest(ctx, log.Scoped("gitlab client"), c.httpClient, req, c.Auth, func(r *http.Request) (*http.Response, error) { return c.httpClient.Do(r) }) if resp != nil { @@ -302,7 +302,7 @@ func (c *Client) WithAuthenticator(a auth.Authenticator) *Client { tokenHash := a.Hash() cc := *c - cc.internalRateLimiter = ratelimit.NewInstrumentedLimiter(c.urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitLabClient", ""), c.urn)) + cc.internalRateLimiter = ratelimit.NewInstrumentedLimiter(c.urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitLabClient"), c.urn)) cc.externalRateLimiter = ratelimit.DefaultMonitorRegistry.GetOrSet(cc.baseURL.String(), tokenHash, "rest", &ratelimit.Monitor{}) cc.Auth = a diff --git a/internal/extsvc/gomodproxy/client.go b/internal/extsvc/gomodproxy/client.go index 71245abeb25..17d33008d4c 100644 --- a/internal/extsvc/gomodproxy/client.go +++ b/internal/extsvc/gomodproxy/client.go @@ -38,7 +38,7 @@ func NewClient(urn string, urls []string, httpfactory *httpcli.Factory) *Client urls: urls, cachedClient: cached, uncachedClient: uncached, - limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GoModClient", ""), urn)), + limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GoModClient"), urn)), } } diff --git a/internal/extsvc/npm/npm.go b/internal/extsvc/npm/npm.go index 210ba469b57..0ba3c390298 100644 --- a/internal/extsvc/npm/npm.go +++ b/internal/extsvc/npm/npm.go @@ -72,7 +72,7 @@ func NewHTTPClient(urn string, registryURL string, credentials string, httpfacto registryURL: registryURL, uncachedClient: uncached, cachedClient: cached, - limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("NPMClient", ""), urn)), + limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("NPMClient"), urn)), credentials: credentials, }, nil } diff --git a/internal/extsvc/npm/observability.go b/internal/extsvc/npm/observability.go index 284c06d5c63..e62b9d6f877 100644 --- a/internal/extsvc/npm/observability.go +++ b/internal/extsvc/npm/observability.go @@ -53,7 +53,7 @@ var ( func getOperations() *operations { opsOnce.Do(func() { - observationCtx := observation.NewContext(log.Scoped("npm", "")) + observationCtx := observation.NewContext(log.Scoped("npm")) ops = newOperations(observationCtx) }) diff --git a/internal/extsvc/pagure/client.go b/internal/extsvc/pagure/client.go index b129e33d194..fe4fa2fcbfe 100644 --- a/internal/extsvc/pagure/client.go +++ b/internal/extsvc/pagure/client.go @@ -52,7 +52,7 @@ func NewClient(urn string, config *schema.PagureConnection, httpClient httpcli.D Config: config, URL: u, httpClient: httpClient, - rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("PagureClient", ""), urn)), + rateLimit: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("PagureClient"), urn)), }, nil } diff --git a/internal/extsvc/pypi/client.go b/internal/extsvc/pypi/client.go index b1dd97aa36b..f8472d0a3a8 100644 --- a/internal/extsvc/pypi/client.go +++ b/internal/extsvc/pypi/client.go @@ -72,7 +72,7 @@ func NewClient(urn string, urls []string, httpfactory *httpcli.Factory) (*Client urls: urls, uncachedClient: uncached, cachedClient: cached, - limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("PyPiClient", ""), urn)), + limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("PyPiClient"), urn)), }, nil } diff --git a/internal/extsvc/rubygems/client.go b/internal/extsvc/rubygems/client.go index 87e18fb1960..88d8ecd1bd5 100644 --- a/internal/extsvc/rubygems/client.go +++ b/internal/extsvc/rubygems/client.go @@ -32,7 +32,7 @@ func NewClient(urn string, registryURL string, httpfactory *httpcli.Factory) (*C return &Client{ registryURL: registryURL, uncachedClient: uncached, - limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("RubyGemsClient", ""), urn)), + limiter: ratelimit.NewInstrumentedLimiter(urn, ratelimit.NewGlobalRateLimiter(log.Scoped("RubyGemsClient"), urn)), }, nil } diff --git a/internal/extsvc/versions/sync.go b/internal/extsvc/versions/sync.go index a5beb2d23dc..680f0205d71 100644 --- a/internal/extsvc/versions/sync.go +++ b/internal/extsvc/versions/sync.go @@ -46,7 +46,7 @@ func (j *syncingJob) Routines(_ context.Context, observationCtx *observation.Con return nil, err } - sourcerLogger := observationCtx.Logger.Scoped("repos.Sourcer", "repository source for syncing") + sourcerLogger := observationCtx.Logger.Scoped("repos.Sourcer") sourcerCF := httpcli.NewExternalClientFactory( httpcli.NewLoggingMiddleware(sourcerLogger), ) diff --git a/internal/gitserver/addrs.go b/internal/gitserver/addrs.go index 52fd2e32385..9c2fcf024f8 100644 --- a/internal/gitserver/addrs.go +++ b/internal/gitserver/addrs.go @@ -294,14 +294,14 @@ func (a *atomicGitServerConns) update(cfg *conf.Unified) { a.conns.Store(&after) return } - log.Scoped("", "gitserver gRPC connections").Info( + log.Scoped("").Info( "new gitserver addresses", log.Strings("before", before.Addresses), log.Strings("after", after.Addresses), ) // Open connections for each address - clientLogger := log.Scoped("gitserver.client", "gitserver gRPC client") + clientLogger := log.Scoped("gitserver.client") after.grpcConns = make(map[string]connAndErr, len(after.Addresses)) for _, addr := range after.Addresses { diff --git a/internal/gitserver/client.go b/internal/gitserver/client.go index 35b3d70c20f..a822af64564 100644 --- a/internal/gitserver/client.go +++ b/internal/gitserver/client.go @@ -89,7 +89,7 @@ type ClientSource interface { // NewClient returns a new gitserver.Client. func NewClient() Client { - logger := sglog.Scoped("GitserverClient", "Client to talk from other services to Gitserver") + logger := sglog.Scoped("GitserverClient") return &clientImplementor{ logger: logger, httpClient: defaultDoer, diff --git a/internal/gitserver/git_command.go b/internal/gitserver/git_command.go index 525657ec830..7cf1ea5cf56 100644 --- a/internal/gitserver/git_command.go +++ b/internal/gitserver/git_command.go @@ -86,7 +86,7 @@ func NewLocalGitCommand(repo api.RepoName, arg ...string) *LocalGitCommand { return &LocalGitCommand{ repo: repo, args: args, - Logger: log.Scoped("local", "local git command logger"), + Logger: log.Scoped("local"), } } diff --git a/internal/gitserver/observability.go b/internal/gitserver/observability.go index c6acd9a3c43..1848c9c90b0 100644 --- a/internal/gitserver/observability.go +++ b/internal/gitserver/observability.go @@ -123,7 +123,7 @@ var ( func getOperations() *operations { operationsInstOnce.Do(func() { - observationCtx := observation.NewContext(log.Scoped("gitserver.client", "gitserver client")) + observationCtx := observation.NewContext(log.Scoped("gitserver.client")) operationsInst = newOperations(observationCtx) }) diff --git a/internal/gitserver/search/search.go b/internal/gitserver/search/search.go index 3dd3f8edb8e..d13626fb629 100644 --- a/internal/gitserver/search/search.go +++ b/internal/gitserver/search/search.go @@ -452,7 +452,7 @@ func utf8String(b []byte) string { } func filterRawDiff(rawDiff []*godiff.FileDiff, filterFunc func(string) (bool, error)) []*godiff.FileDiff { - logger := log.Scoped("filterRawDiff", "sub-repo filtering for raw diffs") + logger := log.Scoped("filterRawDiff") if filterFunc == nil { return rawDiff } diff --git a/internal/goroutine/periodic.go b/internal/goroutine/periodic.go index 6ef6c85a8b2..a70566a8cec 100644 --- a/internal/goroutine/periodic.go +++ b/internal/goroutine/periodic.go @@ -128,7 +128,7 @@ func NewPeriodicGoroutine(ctx context.Context, handler Handler, options ...Optio // enabled, caller should use goroutine.WithOperation if r.operation == nil { r.operation = observation.NewContext( - log.Scoped("periodic", "periodic goroutine handler"), + log.Scoped("periodic"), observation.Tracer(oteltrace.NewNoopTracerProvider().Tracer("noop")), observation.Metrics(metrics.NoOpRegisterer), ).Operation(observation.Op{ diff --git a/internal/grpc/internalerrs/logging.go b/internal/grpc/internalerrs/logging.go index 1b8100ab89a..a76bd445b67 100644 --- a/internal/grpc/internalerrs/logging.go +++ b/internal/grpc/internalerrs/logging.go @@ -44,8 +44,8 @@ func LoggingUnaryClientInterceptor(l log.Logger) grpc.UnaryClientInterceptor { } } - logger := l.Scoped(logScope, logDescription) - logger = logger.Scoped("unaryMethod", "errors that originated from a unary method") + logger := l.Scoped(logScope) + logger = logger.Scoped("unaryMethod") return func(ctx context.Context, fullMethod string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { err := invoker(ctx, fullMethod, req, reply, cc, opts...) @@ -74,8 +74,8 @@ func LoggingStreamClientInterceptor(l log.Logger) grpc.StreamClientInterceptor { } } - logger := l.Scoped(logScope, logDescription) - logger = logger.Scoped("streamingMethod", "errors that originated from a streaming method") + logger := l.Scoped(logScope) + logger = logger.Scoped("streamingMethod") return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, fullMethod string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { serviceName, methodName := grpcutil.SplitMethodName(fullMethod) @@ -86,7 +86,7 @@ func LoggingStreamClientInterceptor(l log.Logger) grpc.StreamClientInterceptor { // until after the stream is created. // // This is fine since the error is already available, and the non-utf8 string check is robust against nil messages. - logger := logger.Scoped("postInit", "errors that occurred after stream initialization, but before the first message was sent") + logger := logger.Scoped("postInit") doLog(logger, serviceName, methodName, nil, nil, err) return nil, err } @@ -106,8 +106,8 @@ func LoggingUnaryServerInterceptor(l log.Logger) grpc.UnaryServerInterceptor { } } - logger := l.Scoped(logScope, logDescription) - logger = logger.Scoped("unaryMethod", "errors that originated from a unary method") + logger := l.Scoped(logScope) + logger = logger.Scoped("unaryMethod") return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { response, err := handler(ctx, req) @@ -136,8 +136,8 @@ func LoggingStreamServerInterceptor(l log.Logger) grpc.StreamServerInterceptor { } } - logger := l.Scoped(logScope, logDescription) - logger = logger.Scoped("streamingMethod", "errors that originated from a streaming method") + logger := l.Scoped(logScope) + logger = logger.Scoped("streamingMethod") return func(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { serviceName, methodName := grpcutil.SplitMethodName(info.FullMethod) @@ -148,8 +148,8 @@ func LoggingStreamServerInterceptor(l log.Logger) grpc.StreamServerInterceptor { } func newLoggingServerStream(s grpc.ServerStream, logger log.Logger, serviceName, methodName string) grpc.ServerStream { - sendLogger := logger.Scoped("postMessageSend", "errors that occurred after sending a message") - receiveLogger := logger.Scoped("postMessageReceive", "errors that occurred after receiving a message") + sendLogger := logger.Scoped("postMessageSend") + receiveLogger := logger.Scoped("postMessageReceive") requestSaver := requestSavingServerStream{ServerStream: s} @@ -171,8 +171,8 @@ func newLoggingServerStream(s grpc.ServerStream, logger log.Logger, serviceName, } func newLoggingClientStream(s grpc.ClientStream, logger log.Logger, serviceName, methodName string) grpc.ClientStream { - sendLogger := logger.Scoped("postMessageSend", "errors that occurred after sending a message") - receiveLogger := logger.Scoped("postMessageReceive", "errors that occurred after receiving a message") + sendLogger := logger.Scoped("postMessageSend") + receiveLogger := logger.Scoped("postMessageReceive") requestSaver := requestSavingClientStream{ClientStream: s} diff --git a/internal/highlight/highlight.go b/internal/highlight/highlight.go index 322cfdf454a..adcb8041c7e 100644 --- a/internal/highlight/highlight.go +++ b/internal/highlight/highlight.go @@ -318,7 +318,7 @@ func Code(ctx context.Context, p Params) (response *HighlightedCode, aborted boo return Mocks.Code(p) } - logger := log.Scoped("highlight", "Code") + logger := log.Scoped("highlight") p.Filepath = normalizeFilepath(p.Filepath) diff --git a/internal/httpcli/client.go b/internal/httpcli/client.go index e7887c6d718..5b49d0b684b 100644 --- a/internal/httpcli/client.go +++ b/internal/httpcli/client.go @@ -320,7 +320,7 @@ const ( // // It also logs metadata set by request context by other middleware, such as NewRetryPolicy. func NewLoggingMiddleware(logger log.Logger) Middleware { - logger = logger.Scoped("httpcli", "http client") + logger = logger.Scoped("httpcli") return func(d Doer) Doer { return DoerFunc(func(r *http.Request) (*http.Response, error) { diff --git a/internal/httpcli/redis_logger_middleware.go b/internal/httpcli/redis_logger_middleware.go index 976eb20d707..90ea6522a5e 100644 --- a/internal/httpcli/redis_logger_middleware.go +++ b/internal/httpcli/redis_logger_middleware.go @@ -114,7 +114,7 @@ func redisLoggerMiddleware() Middleware { // Save new item if err := outboundRequestsRedisFIFOList.Insert(logItemJson); err != nil { // Log would get upset if we created a logger at init time → create logger on the fly - log.Scoped("redisLoggerMiddleware", "").Error("insert log item", log.Error(err)) + log.Scoped("redisLoggerMiddleware").Error("insert log item", log.Error(err)) } }() diff --git a/internal/insights/background/background.go b/internal/insights/background/background.go index a9b2996072d..3fab0ab842d 100644 --- a/internal/insights/background/background.go +++ b/internal/insights/background/background.go @@ -52,14 +52,14 @@ func GetBackgroundJobs(ctx context.Context, logger log.Logger, mainAppDB databas workerInsightsBaseStore := basestore.NewWithHandle(insightsDB.Handle()) // Create basic metrics for recording information about background jobs. - observationCtx := observation.NewContext(logger.Scoped("background", "insights background jobs")) + observationCtx := observation.NewContext(logger.Scoped("background")) insightsMetadataStore := store.NewInsightStore(insightsDB) // Start background goroutines for all of our workers. // The query runner worker is started in a separate routine so it can benefit from horizontal scaling. routines := []goroutine.BackgroundRoutine{ // Discovers and enqueues insights work. - newInsightEnqueuer(ctx, observationCtx, workerBaseStore, insightsMetadataStore, logger.Scoped("background-insight-enqueuer", "")), + newInsightEnqueuer(ctx, observationCtx, workerBaseStore, insightsMetadataStore, logger.Scoped("background-insight-enqueuer")), // Enqueues series to be picked up by the retention worker. newRetentionEnqueuer(ctx, workerInsightsBaseStore, insightsMetadataStore), // Emits backend pings based on insights data. @@ -106,7 +106,7 @@ func GetBackgroundJobs(ctx context.Context, logger log.Logger, mainAppDB databas Help: "Counter of the number of repositories analyzed in the backfiller new state.", }), CostAnalyzer: priority.DefaultQueryAnalyzer(), - RepoQueryExecutor: query.NewStreamingRepoQueryExecutor(logger.Scoped("StreamingRepoExecutor", "execute repo search in background workers")), + RepoQueryExecutor: query.NewStreamingRepoQueryExecutor(logger.Scoped("StreamingRepoExecutor")), } // Add the backfill v2 workers @@ -129,7 +129,7 @@ func GetBackgroundQueryRunnerJob(ctx context.Context, logger log.Logger, mainApp repoStore := mainAppDB.Repos() // Create basic metrics for recording information about background jobs. - observationCtx := observation.NewContext(logger.Scoped("background", "background query runner job")) + observationCtx := observation.NewContext(logger.Scoped("background")) queryRunnerWorkerMetrics, queryRunnerResetterMetrics := newWorkerMetrics(observationCtx, "query_runner_worker") workerStore := queryrunner.CreateDBWorkerStore(observationCtx, workerBaseStore) @@ -138,8 +138,8 @@ func GetBackgroundQueryRunnerJob(ctx context.Context, logger log.Logger, mainApp return []goroutine.BackgroundRoutine{ // Register the query-runner worker and resetter, which executes search queries and records // results to the insights DB. - queryrunner.NewWorker(ctx, logger.Scoped("queryrunner.Worker", ""), workerStore, insightsStore, repoStore, queryRunnerWorkerMetrics, seachQueryLimiter), - queryrunner.NewResetter(ctx, logger.Scoped("queryrunner.Resetter", ""), workerStore, queryRunnerResetterMetrics), + queryrunner.NewWorker(ctx, logger.Scoped("queryrunner.Worker"), workerStore, insightsStore, repoStore, queryRunnerWorkerMetrics, seachQueryLimiter), + queryrunner.NewResetter(ctx, logger.Scoped("queryrunner.Resetter"), workerStore, queryRunnerResetterMetrics), queryrunner.NewCleaner(ctx, observationCtx, workerBaseStore), } } @@ -153,8 +153,8 @@ func GetBackgroundDataRetentionJob(ctx context.Context, observationCtx *observat dbWorkerStore := retention.CreateDBWorkerStore(observationCtx, workerBaseStore) return []goroutine.BackgroundRoutine{ - retention.NewWorker(ctx, observationCtx.Logger.Scoped("Worker", ""), dbWorkerStore, insightsStore, workerMetrics), - retention.NewResetter(ctx, observationCtx.Logger.Scoped("Resetter", ""), dbWorkerStore, resetterMetrics), + retention.NewWorker(ctx, observationCtx.Logger.Scoped("Worker"), dbWorkerStore, insightsStore, workerMetrics), + retention.NewResetter(ctx, observationCtx.Logger.Scoped("Resetter"), dbWorkerStore, resetterMetrics), retention.NewCleaner(ctx, observationCtx, workerBaseStore), } } diff --git a/internal/insights/background/data_prune.go b/internal/insights/background/data_prune.go index afed8e8bb16..70bbfe13bd1 100644 --- a/internal/insights/background/data_prune.go +++ b/internal/insights/background/data_prune.go @@ -18,7 +18,7 @@ import ( // NewInsightsDataPrunerJob will periodically delete recorded data series that have been marked `deleted`. func NewInsightsDataPrunerJob(ctx context.Context, postgres database.DB, insightsdb edb.InsightsDB) goroutine.BackgroundRoutine { interval := time.Minute * 60 - logger := log.Scoped("InsightsDataPrunerJob", "") + logger := log.Scoped("InsightsDataPrunerJob") return goroutine.NewPeriodicGoroutine( ctx, diff --git a/internal/insights/background/license_check.go b/internal/insights/background/license_check.go index f542d7f3f45..8625d8910a5 100644 --- a/internal/insights/background/license_check.go +++ b/internal/insights/background/license_check.go @@ -17,7 +17,7 @@ import ( // NewLicenseCheckJob will periodically check for the existence of a Code Insights license and ensure the correct set of insights is frozen. func NewLicenseCheckJob(ctx context.Context, postgres database.DB, insightsdb edb.InsightsDB) goroutine.BackgroundRoutine { interval := time.Minute * 15 - logger := log.Scoped("CodeInsightsLicenseCheckJob", "") + logger := log.Scoped("CodeInsightsLicenseCheckJob") return goroutine.NewPeriodicGoroutine( ctx, diff --git a/internal/insights/background/limiter/historical.go b/internal/insights/background/limiter/historical.go index f298666b00d..68a0d7fe191 100644 --- a/internal/insights/background/limiter/historical.go +++ b/internal/insights/background/limiter/historical.go @@ -17,7 +17,7 @@ var historicalLimiter *ratelimit.InstrumentedLimiter func HistoricalWorkRate() *ratelimit.InstrumentedLimiter { historicalOnce.Do(func() { - historicalLogger = log.Scoped("insights.historical.ratelimiter", "") + historicalLogger = log.Scoped("insights.historical.ratelimiter") defaultRateLimit := rate.Limit(20.0) defaultBurst := 20 getRateLimit := getHistoricalWorkerRateLimit(defaultRateLimit, defaultBurst) diff --git a/internal/insights/background/limiter/search_query.go b/internal/insights/background/limiter/search_query.go index 9d80383aa6a..40967ebcc09 100644 --- a/internal/insights/background/limiter/search_query.go +++ b/internal/insights/background/limiter/search_query.go @@ -18,7 +18,7 @@ var searchLimiter *ratelimit.InstrumentedLimiter func SearchQueryRate() *ratelimit.InstrumentedLimiter { searchOnce.Do(func() { - searchLogger = log.Scoped("insights.search.ratelimiter", "") + searchLogger = log.Scoped("insights.search.ratelimiter") defaultRateLimit := rate.Limit(20.0) defaultBurst := 20 getRateLimit := getSearchQueryRateLimit(defaultRateLimit, defaultBurst) diff --git a/internal/insights/background/pings/insights_ping_emitter.go b/internal/insights/background/pings/insights_ping_emitter.go index 914897fe30f..b6766c635fe 100644 --- a/internal/insights/background/pings/insights_ping_emitter.go +++ b/internal/insights/background/pings/insights_ping_emitter.go @@ -20,7 +20,7 @@ import ( func NewInsightsPingEmitterJob(ctx context.Context, base database.DB, insights edb.InsightsDB) goroutine.BackgroundRoutine { interval := time.Minute * 60 e := InsightsPingEmitter{ - logger: log.Scoped("InsightsPingEmitter", ""), + logger: log.Scoped("InsightsPingEmitter"), postgresDb: base, insightsDb: insights, } diff --git a/internal/insights/background/queryrunner/search.go b/internal/insights/background/queryrunner/search.go index e196ef2a5e0..8f47c9144ee 100644 --- a/internal/insights/background/queryrunner/search.go +++ b/internal/insights/background/queryrunner/search.go @@ -174,7 +174,7 @@ func generateSearchRecordingsStream(ctx context.Context, job *SearchJob, recordT func makeSearchHandler(provider streamSearchProvider) InsightsHandler { return func(ctx context.Context, job *SearchJob, series *types.InsightSeries, recordTime time.Time) ([]store.RecordSeriesPointArgs, error) { - recordings, err := generateSearchRecordingsStream(ctx, job, recordTime, provider, log.Scoped("SearchRecordingsGenerator", "")) + recordings, err := generateSearchRecordingsStream(ctx, job, recordTime, provider, log.Scoped("SearchRecordingsGenerator")) if err != nil { return nil, errors.Wrapf(err, "searchHandler") } @@ -187,7 +187,7 @@ func makeComputeHandler(provider streamComputeProvider) InsightsHandler { computeDelegate := func(ctx context.Context, job *SearchJob, recordTime time.Time, logger log.Logger) (_ []store.RecordSeriesPointArgs, err error) { return generateComputeRecordingsStream(ctx, job, recordTime, provider, logger) } - recordings, err := computeDelegate(ctx, job, recordTime, log.Scoped("ComputeRecordingsGenerator", "")) + recordings, err := computeDelegate(ctx, job, recordTime, log.Scoped("ComputeRecordingsGenerator")) if err != nil { return nil, errors.Wrapf(err, "computeHandler") } @@ -197,7 +197,7 @@ func makeComputeHandler(provider streamComputeProvider) InsightsHandler { func makeMappingComputeHandler(provider streamComputeProvider) InsightsHandler { return func(ctx context.Context, job *SearchJob, series *types.InsightSeries, recordTime time.Time) ([]store.RecordSeriesPointArgs, error) { - recordings, err := generateComputeRecordingsStream(ctx, job, recordTime, provider, log.Scoped("ComputeMappingRecordingsGenerator", "")) + recordings, err := generateComputeRecordingsStream(ctx, job, recordTime, provider, log.Scoped("ComputeMappingRecordingsGenerator")) if err != nil { return nil, errors.Wrapf(err, "mappingComputeHandler") } diff --git a/internal/insights/background/queryrunner/worker.go b/internal/insights/background/queryrunner/worker.go index 384a1632389..1cb9ce59fb7 100644 --- a/internal/insights/background/queryrunner/worker.go +++ b/internal/insights/background/queryrunner/worker.go @@ -77,7 +77,7 @@ func NewWorker(ctx context.Context, logger log.Logger, workerStore *workerStoreE metadadataStore: store.NewInsightStoreWith(insightsStore), seriesCache: sharedCache, searchHandlers: GetSearchHandlers(), - logger: log.Scoped("insights.queryRunner.Handler", ""), + logger: log.Scoped("insights.queryRunner.Handler"), }, options) } diff --git a/internal/insights/pipeline/backfill.go b/internal/insights/pipeline/backfill.go index 6a97eeae1fa..273050f2692 100644 --- a/internal/insights/pipeline/backfill.go +++ b/internal/insights/pipeline/backfill.go @@ -67,7 +67,7 @@ type BackfillerConfig struct { } func NewDefaultBackfiller(config BackfillerConfig) Backfiller { - logger := log.Scoped("insightsBackfiller", "") + logger := log.Scoped("insightsBackfiller") searchJobGenerator := makeSearchJobsFunc(logger, config.CommitClient, config.CompressionPlan, config.SearchPlanWorkerLimit, config.HistoricRateLimiter) searchRunner := makeRunSearchFunc(config.SearchHandlers, config.SearchRunnerWorkerLimit, config.SearchRateLimiter) persister := makeSaveResultsFunc(logger, config.InsightStore) diff --git a/internal/insights/query/capture_group_executor.go b/internal/insights/query/capture_group_executor.go index 5162ee8bbb7..a8e538f9498 100644 --- a/internal/insights/query/capture_group_executor.go +++ b/internal/insights/query/capture_group_executor.go @@ -37,7 +37,7 @@ func NewCaptureGroupExecutor(db database.DB, clock func() time.Time) *CaptureGro clock: clock, }, computeSearch: streamCompute, - logger: log.Scoped("CaptureGroupExecutor", ""), + logger: log.Scoped("CaptureGroupExecutor"), } } diff --git a/internal/insights/query/compute_executor.go b/internal/insights/query/compute_executor.go index 0894d700944..96cc23fe464 100644 --- a/internal/insights/query/compute_executor.go +++ b/internal/insights/query/compute_executor.go @@ -26,7 +26,7 @@ type ComputeExecutor struct { func NewComputeExecutor(postgres database.DB, clock func() time.Time) *ComputeExecutor { executor := ComputeExecutor{ - logger: log.Scoped("ComputeExecutor", "a logger scoped to query.ComputeExecutor"), + logger: log.Scoped("ComputeExecutor"), previewExecutor: previewExecutor{ repoStore: postgres.Repos(), filter: &compression.NoopFilter{}, diff --git a/internal/insights/query/streaming/search_client.go b/internal/insights/query/streaming/search_client.go index 74bebffd1cb..629c4884e9e 100644 --- a/internal/insights/query/streaming/search_client.go +++ b/internal/insights/query/streaming/search_client.go @@ -16,7 +16,7 @@ type SearchClient interface { } func NewInsightsSearchClient(db database.DB) SearchClient { - logger := log.Scoped("insightsSearchClient", "") + logger := log.Scoped("insightsSearchClient") return &insightsSearchClient{ db: db, searchClient: client.New(logger, db), diff --git a/internal/insights/query/streaming_query_executor.go b/internal/insights/query/streaming_query_executor.go index f4af45ef111..6a7ac709dfd 100644 --- a/internal/insights/query/streaming_query_executor.go +++ b/internal/insights/query/streaming_query_executor.go @@ -35,7 +35,7 @@ func NewStreamingExecutor(db database.DB, clock func() time.Time) *StreamingQuer filter: &compression.NoopFilter{}, clock: clock, }, - logger: log.Scoped("StreamingQueryExecutor", ""), + logger: log.Scoped("StreamingQueryExecutor"), } } diff --git a/internal/insights/scheduler/backfill_state_inprogress_handler.go b/internal/insights/scheduler/backfill_state_inprogress_handler.go index ee40f432b15..1e5e076a810 100644 --- a/internal/insights/scheduler/backfill_state_inprogress_handler.go +++ b/internal/insights/scheduler/backfill_state_inprogress_handler.go @@ -78,13 +78,13 @@ func makeInProgressWorker(ctx context.Context, config JobMonitorConfig) (*worker Metrics: workerutil.NewMetrics(config.ObservationCtx, name), }) - resetter := dbworker.NewResetter(log.Scoped("", ""), workerStore, dbworker.ResetterOptions{ + resetter := dbworker.NewResetter(log.Scoped("resetter"), workerStore, dbworker.ResetterOptions{ Name: fmt.Sprintf("%s_resetter", name), Interval: time.Second * 20, Metrics: dbworker.NewResetterMetrics(config.ObservationCtx, name), }) - configLogger := log.Scoped("insightsInProgressConfigWatcher", "") + configLogger := log.Scoped("insightsInProgressConfigWatcher") mu := sync.Mutex{} conf.Watch(func() { mu.Lock() diff --git a/internal/insights/scheduler/backfill_state_new_handler.go b/internal/insights/scheduler/backfill_state_new_handler.go index 6ef758cbf5a..ed0ec175e81 100644 --- a/internal/insights/scheduler/backfill_state_new_handler.go +++ b/internal/insights/scheduler/backfill_state_new_handler.go @@ -75,7 +75,7 @@ func makeNewBackfillWorker(ctx context.Context, config JobMonitorConfig) (*worke Metrics: workerutil.NewMetrics(config.ObservationCtx, name), }) - resetter := dbworker.NewResetter(log.Scoped("BackfillNewResetter", ""), workerStore, dbworker.ResetterOptions{ + resetter := dbworker.NewResetter(log.Scoped("BackfillNewResetter"), workerStore, dbworker.ResetterOptions{ Name: fmt.Sprintf("%s_resetter", name), Interval: time.Second * 20, Metrics: dbworker.NewResetterMetrics(config.ObservationCtx, name), diff --git a/internal/insights/store/permissions.go b/internal/insights/store/permissions.go index 6a86c59e061..71fada11b2d 100644 --- a/internal/insights/store/permissions.go +++ b/internal/insights/store/permissions.go @@ -22,7 +22,7 @@ type InsightPermStore struct { func NewInsightPermissionStore(db database.DB) *InsightPermStore { return &InsightPermStore{ - logger: log.Scoped("InsightPermStore", ""), + logger: log.Scoped("InsightPermStore"), Store: basestore.NewWithHandle(db.Handle()), } } diff --git a/internal/luasandbox/init.go b/internal/luasandbox/init.go index 935eadbebca..05e1f6879ff 100644 --- a/internal/luasandbox/init.go +++ b/internal/luasandbox/init.go @@ -7,5 +7,5 @@ import ( ) func NewService() *Service { - return newService(observation.NewContext(log.Scoped("luasandbox", ""))) + return newService(observation.NewContext(log.Scoped("luasandbox"))) } diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go index ed4bb1d679d..20bd8049451 100644 --- a/internal/metrics/metrics.go +++ b/internal/metrics/metrics.go @@ -196,7 +196,7 @@ func newDiskCollector(path string) prometheus.Collector { nil, constLabels, ), - logger: log.Scoped("diskCollector", ""), + logger: log.Scoped("diskCollector"), } } diff --git a/internal/observation/context.go b/internal/observation/context.go index 4a48d6150f2..e9fc739c02f 100644 --- a/internal/observation/context.go +++ b/internal/observation/context.go @@ -72,7 +72,6 @@ func ContextWithLogger(logger log.Logger, parent *Context) *Context { func ScopedContext(team, domain, component string, parent *Context) *Context { return ContextWithLogger(log.Scoped( fmt.Sprintf("%s.%s.%s", team, domain, component), - fmt.Sprintf("%s %s %s", team, domain, component), ), parent) } @@ -82,10 +81,10 @@ func (c *Context) Operation(args Op) *Operation { var logger log.Logger if c.Logger != nil { // Create a child logger, if a parent is provided. - logger = c.Logger.Scoped(args.Name, args.Description) + logger = c.Logger.Scoped(args.Name) } else { // Create a new logger. - logger = log.Scoped(args.Name, args.Description) + logger = log.Scoped(args.Name) } return &Operation{ context: c, diff --git a/internal/oobmigration/migrations/batches/extsvc_webhook_migrator.go b/internal/oobmigration/migrations/batches/extsvc_webhook_migrator.go index ed19c93b52a..65ef3085b75 100644 --- a/internal/oobmigration/migrations/batches/extsvc_webhook_migrator.go +++ b/internal/oobmigration/migrations/batches/extsvc_webhook_migrator.go @@ -26,7 +26,7 @@ var _ oobmigration.Migrator = &externalServiceWebhookMigrator{} func NewExternalServiceWebhookMigratorWithDB(store *basestore.Store, key encryption.Key, batchSize int) *externalServiceWebhookMigrator { return &externalServiceWebhookMigrator{ - logger: log.Scoped("ExternalServiceWebhookMigrator", ""), + logger: log.Scoped("ExternalServiceWebhookMigrator"), store: store, batchSize: batchSize, key: key, diff --git a/internal/oobmigration/migrations/batches/ssh_migrator.go b/internal/oobmigration/migrations/batches/ssh_migrator.go index 13c375d1491..43acbe0933c 100644 --- a/internal/oobmigration/migrations/batches/ssh_migrator.go +++ b/internal/oobmigration/migrations/batches/ssh_migrator.go @@ -25,7 +25,7 @@ var _ oobmigration.Migrator = &SSHMigrator{} func NewSSHMigratorWithDB(store *basestore.Store, key encryption.Key, batchSize int) *SSHMigrator { return &SSHMigrator{ - logger: log.Scoped("SSHMigrator", ""), + logger: log.Scoped("SSHMigrator"), store: store, key: key, batchSize: batchSize, diff --git a/internal/oobmigration/migrations/insights/migrator.go b/internal/oobmigration/migrations/insights/migrator.go index 31eb0464a2c..ad94b2774ab 100644 --- a/internal/oobmigration/migrations/insights/migrator.go +++ b/internal/oobmigration/migrations/insights/migrator.go @@ -25,7 +25,7 @@ func NewMigrator(frontendDB, insightsDB *basestore.Store) *insightsMigrator { return &insightsMigrator{ frontendStore: frontendDB, insightsStore: insightsDB, - logger: log.Scoped("insights-migrator", ""), + logger: log.Scoped("insights-migrator"), } } diff --git a/internal/oobmigration/runner.go b/internal/oobmigration/runner.go index dde3d6a3206..096449247ce 100644 --- a/internal/oobmigration/runner.go +++ b/internal/oobmigration/runner.go @@ -53,7 +53,7 @@ func newRunner(observationCtx *observation.Context, store storeIface, refreshTic return &Runner{ store: store, - logger: observationCtx.Logger.Scoped("oobmigration", ""), + logger: observationCtx.Logger.Scoped("oobmigration"), refreshTicker: refreshTicker, operations: newOperations(observationCtx), migrators: map[int]migratorAndOption{}, diff --git a/internal/own/background/background.go b/internal/own/background/background.go index 0cf9509d20a..9de02117bf4 100644 --- a/internal/own/background/background.go +++ b/internal/own/background/background.go @@ -165,7 +165,7 @@ func makeWorker(ctx context.Context, db database.DB, observationCtx *observation Metrics: workerutil.NewMetrics(observationCtx, "own_background_worker_processor"), }) - resetter := dbworker.NewResetter(log.Scoped("OwnBackgroundResetter", ""), workerStore, dbworker.ResetterOptions{ + resetter := dbworker.NewResetter(log.Scoped("OwnBackgroundResetter"), workerStore, dbworker.ResetterOptions{ Name: "own_background_worker_resetter", Interval: time.Second * 20, Metrics: dbworker.NewResetterMetrics(observationCtx, "own_background_worker"), diff --git a/internal/pubsub/topic.go b/internal/pubsub/topic.go index 8cd7a370910..3c8d5af319f 100644 --- a/internal/pubsub/topic.go +++ b/internal/pubsub/topic.go @@ -104,7 +104,7 @@ func (c *noopTopicClient) Stop() {} // Log entries are generated at debug level. func NewLoggingTopicClient(logger log.Logger) TopicClient { return &loggingTopicClient{ - logger: logger.Scoped("pubsub", "pubsub message printer for use in development"), + logger: logger.Scoped("pubsub"), } } diff --git a/internal/ratelimit/globallimiter.go b/internal/ratelimit/globallimiter.go index edce2b78d5a..818ebcf0776 100644 --- a/internal/ratelimit/globallimiter.go +++ b/internal/ratelimit/globallimiter.go @@ -66,7 +66,7 @@ type globalRateLimiter struct { } func NewGlobalRateLimiter(logger log.Logger, bucketName string) GlobalLimiter { - logger = logger.Scoped(fmt.Sprintf("GlobalRateLimiter.%s", bucketName), "") + logger = logger.Scoped(fmt.Sprintf("GlobalRateLimiter.%s", bucketName)) // Pool can return false for ok if the implementation of `KeyValue` is not // backed by a real redis server. For App, we implemented an in-memory version diff --git a/internal/repos/github.go b/internal/repos/github.go index a4a32a28ec9..13c98f3fd43 100644 --- a/internal/repos/github.go +++ b/internal/repos/github.go @@ -161,11 +161,11 @@ func newGitHubSource( urn := svc.URN() var ( - v3ClientLogger = log.Scoped("source", "github client for github source") + v3ClientLogger = log.Scoped("source") v3Client = github.NewV3Client(v3ClientLogger, urn, apiURL, auther, cli) v4Client = github.NewV4Client(urn, apiURL, auther, cli) - searchClientLogger = log.Scoped("search", "github client for search") + searchClientLogger = log.Scoped("search") searchClient = github.NewV3SearchClient(searchClientLogger, urn, apiURL, auther, cli) ) diff --git a/internal/repos/scheduler/scheduler.go b/internal/repos/scheduler/scheduler.go index febb377c7e3..a9a8038e427 100644 --- a/internal/repos/scheduler/scheduler.go +++ b/internal/repos/scheduler/scheduler.go @@ -72,7 +72,7 @@ const notifyChanBuffer = 1 // NewUpdateScheduler returns a new scheduler. func NewUpdateScheduler(logger log.Logger, db database.DB, gitserverClient gitserver.Client) *UpdateScheduler { - updateSchedLogger := logger.Scoped("UpdateScheduler", "repo update scheduler") + updateSchedLogger := logger.Scoped("UpdateScheduler") return &UpdateScheduler{ db: db, @@ -85,7 +85,7 @@ func NewUpdateScheduler(logger log.Logger, db database.DB, gitserverClient gitse index: make(map[api.RepoID]*scheduledRepoUpdate), wakeup: make(chan struct{}, notifyChanBuffer), randGenerator: rand.New(rand.NewSource(time.Now().UnixNano())), - logger: updateSchedLogger.Scoped("Schedule", ""), + logger: updateSchedLogger.Scoped("Schedule"), }, logger: updateSchedLogger, } @@ -169,7 +169,7 @@ func (s *UpdateScheduler) runUpdateLoop(ctx context.Context) { break } - subLogger := s.logger.Scoped("RunUpdateLoop", "") + subLogger := s.logger.Scoped("RunUpdateLoop") go func(ctx context.Context, repo configuredRepo, cancel context.CancelFunc) { defer cancel() diff --git a/internal/repos/sources.go b/internal/repos/sources.go index ea87710a3f0..1480c8bda23 100644 --- a/internal/repos/sources.go +++ b/internal/repos/sources.go @@ -27,7 +27,7 @@ type Sourcer func(context.Context, *types.ExternalService) (Source, error) // The provided decorator functions will be applied to the Source. func NewSourcer(logger log.Logger, db database.DB, cf *httpcli.Factory, decs ...func(Source) Source) Sourcer { return func(ctx context.Context, svc *types.ExternalService) (Source, error) { - src, err := NewSource(ctx, logger.Scoped("source", ""), db, svc, cf) + src, err := NewSource(ctx, logger.Scoped("source"), db, svc, cf) if err != nil { return nil, err } @@ -44,21 +44,21 @@ func NewSourcer(logger log.Logger, db database.DB, cf *httpcli.Factory, decs ... func NewSource(ctx context.Context, logger log.Logger, db database.DB, svc *types.ExternalService, cf *httpcli.Factory) (Source, error) { switch strings.ToUpper(svc.Kind) { case extsvc.KindGitHub: - return NewGitHubSource(ctx, logger.Scoped("GithubSource", "GitHub repo source"), db, svc, cf) + return NewGitHubSource(ctx, logger.Scoped("GithubSource"), db, svc, cf) case extsvc.KindGitLab: - return NewGitLabSource(ctx, logger.Scoped("GitLabSource", "GitLab repo source"), svc, cf) + return NewGitLabSource(ctx, logger.Scoped("GitLabSource"), svc, cf) case extsvc.KindAzureDevOps: - return NewAzureDevOpsSource(ctx, logger.Scoped("AzureDevOpsSource", "GitLab repo source"), svc, cf) + return NewAzureDevOpsSource(ctx, logger.Scoped("AzureDevOpsSource"), svc, cf) case extsvc.KindGerrit: return NewGerritSource(ctx, svc, cf) case extsvc.KindBitbucketServer: - return NewBitbucketServerSource(ctx, logger.Scoped("BitbucketServerSource", "bitbucket server repo source"), svc, cf) + return NewBitbucketServerSource(ctx, logger.Scoped("BitbucketServerSource"), svc, cf) case extsvc.KindBitbucketCloud: - return NewBitbucketCloudSource(ctx, logger.Scoped("BitbucketCloudSource", "bitbucket cloud repo source"), svc, cf) + return NewBitbucketCloudSource(ctx, logger.Scoped("BitbucketCloudSource"), svc, cf) case extsvc.KindGitolite: return NewGitoliteSource(ctx, svc, cf) case extsvc.KindPhabricator: - return NewPhabricatorSource(ctx, logger.Scoped("PhabricatorSource", "phabricator repo source"), svc, cf) + return NewPhabricatorSource(ctx, logger.Scoped("PhabricatorSource"), svc, cf) case extsvc.KindAWSCodeCommit: return NewAWSCodeCommitSource(ctx, svc, cf) case extsvc.KindPerforce: @@ -79,9 +79,9 @@ func NewSource(ctx context.Context, logger log.Logger, db database.DB, svc *type case extsvc.KindRubyPackages: return NewRubyPackagesSource(ctx, svc, cf) case extsvc.KindOther: - return NewOtherSource(ctx, svc, cf, logger.Scoped("OtherSource", "")) + return NewOtherSource(ctx, svc, cf, logger.Scoped("OtherSource")) case extsvc.VariantLocalGit.AsKind(): - return NewLocalGitSource(ctx, logger.Scoped("LocalSource", "local repo source"), svc) + return NewLocalGitSource(ctx, logger.Scoped("LocalSource"), svc) default: return nil, errors.Newf("cannot create source for kind %q", svc.Kind) } diff --git a/internal/repos/sync_worker.go b/internal/repos/sync_worker.go index d745b527ec0..b7fff029cd8 100644 --- a/internal/repos/sync_worker.go +++ b/internal/repos/sync_worker.go @@ -56,7 +56,7 @@ func NewSyncWorker(ctx context.Context, observationCtx *observation.Context, dbH sqlf.Sprintf("next_sync_at"), } - observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("repo.sync.workerstore.Store", ""), observationCtx) + observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("repo.sync.workerstore.Store"), observationCtx) store := dbworkerstore.New(observationCtx, dbHandle, dbworkerstore.Options[*SyncJob]{ Name: "repo_sync_worker_store", @@ -79,7 +79,7 @@ func NewSyncWorker(ctx context.Context, observationCtx *observation.Context, dbH Metrics: newWorkerMetrics(observationCtx), }) - resetter := dbworker.NewResetter(observationCtx.Logger.Scoped("repo.sync.worker.Resetter", ""), store, dbworker.ResetterOptions{ + resetter := dbworker.NewResetter(observationCtx.Logger.Scoped("repo.sync.worker.Resetter"), store, dbworker.ResetterOptions{ Name: "repo_sync_worker_resetter", Interval: 5 * time.Minute, Metrics: newResetterMetrics(observationCtx), @@ -96,7 +96,7 @@ func NewSyncWorker(ctx context.Context, observationCtx *observation.Context, dbH } func newWorkerMetrics(observationCtx *observation.Context) workerutil.WorkerObservability { - observationCtx = observation.ContextWithLogger(log.Scoped("sync_worker", ""), observationCtx) + observationCtx = observation.ContextWithLogger(log.Scoped("sync_worker"), observationCtx) return workerutil.NewMetrics(observationCtx, "repo_updater_external_service_syncer") } diff --git a/internal/repos/syncer.go b/internal/repos/syncer.go index 0c9d6758199..c7584bc42a6 100644 --- a/internal/repos/syncer.go +++ b/internal/repos/syncer.go @@ -52,7 +52,7 @@ func NewSyncer(observationCtx *observation.Context, store Store, sourcer Sourcer Store: store, Synced: make(chan types.RepoSyncDiff), Now: func() time.Time { return time.Now().UTC() }, - ObsvCtx: observation.ContextWithLogger(observationCtx.Logger.Scoped("syncer", "repo syncer"), observationCtx), + ObsvCtx: observation.ContextWithLogger(observationCtx.Logger.Scoped("syncer"), observationCtx), } } @@ -80,7 +80,7 @@ func (s *Syncer) Routines(ctx context.Context, store Store, opts RunOptions) []g s.initialUnmodifiedDiffFromStore(ctx, store) } - worker, resetter, syncerJanitor := NewSyncWorker(ctx, observation.ContextWithLogger(s.ObsvCtx.Logger.Scoped("syncWorker", ""), s.ObsvCtx), + worker, resetter, syncerJanitor := NewSyncWorker(ctx, observation.ContextWithLogger(s.ObsvCtx.Logger.Scoped("syncWorker"), s.ObsvCtx), store.Handle(), &syncHandler{ syncer: s, diff --git a/internal/repoupdater/client.go b/internal/repoupdater/client.go index 6973f92d62e..a0af422cc75 100644 --- a/internal/repoupdater/client.go +++ b/internal/repoupdater/client.go @@ -72,7 +72,7 @@ func NewClient(serverURL string) *Client { return nil, err } - l := log.Scoped("repoUpdateGRPCClient", "gRPC client for repo-updater") + l := log.Scoped("repoUpdateGRPCClient") conn, err := defaults.Dial(u.Host, l) if err != nil { return nil, err diff --git a/internal/rockskip/server.go b/internal/rockskip/server.go index d6115427363..756030181e2 100644 --- a/internal/rockskip/server.go +++ b/internal/rockskip/server.go @@ -60,7 +60,7 @@ func NewService( indexRequestQueues[i] = make(chan indexRequest, indexRequestsQueueSize) } - logger := log.Scoped("service", "") + logger := log.Scoped("service") service := &Service{ logger: logger, diff --git a/internal/scim/user_service.go b/internal/scim/user_service.go index d6cf7e8894e..e2f96b49598 100644 --- a/internal/scim/user_service.go +++ b/internal/scim/user_service.go @@ -59,9 +59,9 @@ type UserResourceHandler struct { func (h *UserResourceHandler) getLogger() log.Logger { if h.observationCtx != nil && h.observationCtx.Logger != nil { - return h.observationCtx.Logger.Scoped("scim.user", "resource handler for scim user") + return h.observationCtx.Logger.Scoped("scim.user") } - return log.Scoped("scim.user", "resource handler for scim user") + return log.Scoped("scim.user") } // NewUserResourceHandler returns a new UserResourceHandler. @@ -96,7 +96,7 @@ type UserSCIMService struct { } func (u *UserSCIMService) getLogger() log.Logger { - return log.Scoped("scim.user", "scim service for user") + return log.Scoped("scim.user") } func (u *UserSCIMService) Get(ctx context.Context, id string) (scim.Resource, error) { diff --git a/internal/search/backend/metered_searcher.go b/internal/search/backend/metered_searcher.go index 0ef013d3f5b..297df30a539 100644 --- a/internal/search/backend/metered_searcher.go +++ b/internal/search/backend/metered_searcher.go @@ -35,7 +35,7 @@ func NewMeteredSearcher(hostname string, z zoekt.Streamer) zoekt.Streamer { return &meteredSearcher{ Streamer: z, hostname: hostname, - log: sglog.Scoped("meteredSearcher", "wraps zoekt.Streamer with observability"), + log: sglog.Scoped("meteredSearcher"), } } diff --git a/internal/search/backend/zoekt.go b/internal/search/backend/zoekt.go index deb4124f30e..c6fc7b6e300 100644 --- a/internal/search/backend/zoekt.go +++ b/internal/search/backend/zoekt.go @@ -117,7 +117,7 @@ const maxRecvMsgSize = 128 * 1024 * 1024 // 128MiB func ZoektDialGRPC(endpoint string) zoekt.Streamer { conn, err := defaults.Dial( endpoint, - log.Scoped("zoekt", "Dial"), + log.Scoped("zoekt"), grpc.WithDefaultCallOptions(grpc.MaxCallRecvMsgSize(maxRecvMsgSize)), ) return NewMeteredSearcher(endpoint, &zoektGRPCClient{ diff --git a/internal/search/env.go b/internal/search/env.go index d7b062ce030..6f42c555233 100644 --- a/internal/search/env.go +++ b/internal/search/env.go @@ -43,7 +43,7 @@ func SearcherURLs() *endpoint.Map { func SearcherGRPCConnectionCache() *defaults.ConnectionCache { searcherGRPCConnectionCacheOnce.Do(func() { - logger := log.Scoped("searcherGRPCConnectionCache", "gRPC connection cache for searcher endpoints") + logger := log.Scoped("searcherGRPCConnectionCache") searcherGRPCConnectionCache = defaults.NewConnectionCache(logger) }) diff --git a/internal/search/exhaustive/service/service.go b/internal/search/exhaustive/service/service.go index 3e3e13f9d87..6c6c18724cc 100644 --- a/internal/search/exhaustive/service/service.go +++ b/internal/search/exhaustive/service/service.go @@ -30,7 +30,7 @@ func New( uploadStore uploadstore.Store, newSearcher NewSearcher, ) *Service { - logger := log.Scoped("searchjobs.Service", "search job service") + logger := log.Scoped("searchjobs.Service") svc := &Service{ logger: logger, diff --git a/internal/search/streaming/client/metadata.go b/internal/search/streaming/client/metadata.go index e5605df6df1..8ed4865c2dc 100644 --- a/internal/search/streaming/client/metadata.go +++ b/internal/search/streaming/client/metadata.go @@ -14,7 +14,7 @@ import ( // RepoNamer returns a best-effort function which translates repository IDs into names. func RepoNamer(ctx context.Context, db database.DB) streamapi.RepoNamer { - logger := log.Scoped("RepoNamer", "translate repository IDs into names") + logger := log.Scoped("RepoNamer") cache := map[api.RepoID]api.RepoName{} return func(ids []api.RepoID) []api.RepoName { diff --git a/internal/service/svcmain/svcmain.go b/internal/service/svcmain/svcmain.go index 3adcbfafc9e..f37045fc812 100644 --- a/internal/service/svcmain/svcmain.go +++ b/internal/service/svcmain/svcmain.go @@ -86,7 +86,7 @@ func Main(services []sgservice.Service, config Config, args []string) { }, } app.Action = func(_ *cli.Context) error { - logger := log.Scoped("sourcegraph", "Sourcegraph") + logger := log.Scoped("sourcegraph") cleanup := singleprogram.Init(logger) defer func() { err := cleanup() @@ -124,7 +124,7 @@ func SingleServiceMain(svc sgservice.Service, config Config) { }, ), ) - logger := log.Scoped("sourcegraph", "Sourcegraph") + logger := log.Scoped("sourcegraph") run(liblog, logger, []sgservice.Service{svc}, config, nil) } @@ -156,7 +156,7 @@ func SingleServiceMainWithoutConf(svc sgservice.Service, config Config, oobConfi }, ), ) - logger := log.Scoped("sourcegraph", "Sourcegraph") + logger := log.Scoped("sourcegraph") run(liblog, logger, []sgservice.Service{svc}, config, &oobConfig) } @@ -186,7 +186,7 @@ func run( go oobConfig.Logging.Watch(liblog.Update(oobConfig.Logging.SinksConfig)) } if oobConfig.Tracing != nil { - tracer.Init(log.Scoped("tracer", "internal tracer package"), oobConfig.Tracing) + tracer.Init(log.Scoped("tracer"), oobConfig.Tracing) } profiler.Init() @@ -244,7 +244,7 @@ func run( // TODO(sqs): TODO(single-binary): Consider using the goroutine package and/or the errgroup package to report // errors and listen to signals to initiate cleanup in a consistent way across all // services. - obctx := observation.ContextWithLogger(log.Scoped(service.Name(), service.Name()), obctx) + obctx := observation.ContextWithLogger(log.Scoped(service.Name()), obctx) // ensure ready is only called once and always call it. ready := syncx.OnceFunc(allReadyWG.Done) diff --git a/internal/symbols/client.go b/internal/symbols/client.go index e9dceb1716d..3af0f65d082 100644 --- a/internal/symbols/client.go +++ b/internal/symbols/client.go @@ -44,7 +44,7 @@ func defaultEndpoints() *endpoint.Map { func LoadConfig() { DefaultClient = &Client{ Endpoints: defaultEndpoints(), - GRPCConnectionCache: defaults.NewConnectionCache(log.Scoped("symbolsConnectionCache", "grpc connection cache for clients of the symbols service")), + GRPCConnectionCache: defaults.NewConnectionCache(log.Scoped("symbolsConnectionCache")), HTTPClient: defaultDoer, HTTPLimiter: limiter.New(500), SubRepoPermsChecker: func() authz.SubRepoPermissionChecker { return authz.DefaultSubRepoPermsChecker }, diff --git a/internal/telemetry/telemetryrecorder/telemetryrecorder.go b/internal/telemetry/telemetryrecorder/telemetryrecorder.go index 4453a8c36bd..77c0092bc20 100644 --- a/internal/telemetry/telemetryrecorder/telemetryrecorder.go +++ b/internal/telemetry/telemetryrecorder/telemetryrecorder.go @@ -30,6 +30,6 @@ func New(db database.DB) *telemetry.EventRecorder { // as the new Telemetry Gateway export queue. func NewBestEffort(logger log.Logger, db database.DB) *telemetry.BestEffortEventRecorder { return telemetry.NewBestEffortEventRecorder( - logger.Scoped("telemetry", "telemetry event recorder"), + logger.Scoped("telemetry"), New(db)) } diff --git a/internal/trace/httptrace.go b/internal/trace/httptrace.go index 8eb7a3b84ba..a8fc56ae423 100644 --- a/internal/trace/httptrace.go +++ b/internal/trace/httptrace.go @@ -109,7 +109,7 @@ var ( // 🚨 SECURITY: This handler is served to all clients, even on private servers to clients who have // not authenticated. It must not reveal any sensitive information. func HTTPMiddleware(l log.Logger, next http.Handler, siteConfig conftypes.SiteConfigQuerier) http.Handler { - l = l.Scoped("http", "http tracing middleware") + l = l.Scoped("http") return loggingRecoverer(l, http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { ctx := r.Context() diff --git a/internal/tracer/tracer.go b/internal/tracer/tracer.go index b7526a236cb..f2007ff164f 100644 --- a/internal/tracer/tracer.go +++ b/internal/tracer/tracer.go @@ -123,7 +123,7 @@ func Init(logger log.Logger, c WatchableConfigurationSource) { func newTracer(logger log.Logger, provider *oteltracesdk.TracerProvider, debugMode *atomic.Bool) oteltrace.TracerProvider { // Set up logging - otelLogger := logger.AddCallerSkip(2).Scoped("otel", "OpenTelemetry library") + otelLogger := logger.AddCallerSkip(2).Scoped("otel") otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) { if debugMode.Load() { otelLogger.Warn("error encountered", log.Error(err)) diff --git a/internal/ttlcache/cache.go b/internal/ttlcache/cache.go index 8718696a5a6..15b751779cd 100644 --- a/internal/ttlcache/cache.go +++ b/internal/ttlcache/cache.go @@ -57,7 +57,7 @@ func New[K comparable, V any](newEntryFunc func(K) V, options ...Option[K, V]) * newEntryFunc: newEntryFunc, expirationFunc: func(k K, v V) {}, - logger: log.Scoped("ttlcache", "cache"), + logger: log.Scoped("ttlcache"), sizeWarningThreshold: 0, diff --git a/internal/updatecheck/client.go b/internal/updatecheck/client.go index 06f0be07ceb..1aafafecd75 100644 --- a/internal/updatecheck/client.go +++ b/internal/updatecheck/client.go @@ -385,7 +385,7 @@ func getAndMarshalRepoMetadataUsageJSON(ctx context.Context, db database.DB) (_ } func getDependencyVersions(ctx context.Context, db database.DB, logger log.Logger) (json.RawMessage, error) { - logFunc := logFuncFrom(logger.Scoped("getDependencyVersions", "gets the version of various dependency services")) + logFunc := logFuncFrom(logger.Scoped("getDependencyVersions")) var ( err error dv dependencyVersions @@ -508,7 +508,7 @@ func getAndMarshalOwnUsageJSON(ctx context.Context, db database.DB) (json.RawMes } func updateBody(ctx context.Context, logger log.Logger, db database.DB) (io.Reader, error) { - scopedLog := logger.Scoped("telemetry", "track and update various usages stats") + scopedLog := logger.Scoped("telemetry") logFunc := scopedLog.Debug if envvar.SourcegraphDotComMode() { logFunc = scopedLog.Warn @@ -915,7 +915,7 @@ func Start(logger log.Logger, db database.DB) { started = true const delay = 30 * time.Minute - scopedLog := logger.Scoped("updatecheck", "checks for updates of services and updates usage telemetry") + scopedLog := logger.Scoped("updatecheck") for { check(scopedLog, db) diff --git a/internal/updatecheck/handler.go b/internal/updatecheck/handler.go index bf0fb3d6654..4ec563e4d38 100644 --- a/internal/updatecheck/handler.go +++ b/internal/updatecheck/handler.go @@ -395,7 +395,7 @@ type pingPayload struct { } func logPing(logger log.Logger, pubsubClient pubsub.TopicClient, meter *Meter, r *http.Request, pr *pingRequest, hasUpdate bool) { - logger = logger.Scoped("logPing", "logs ping requests") + logger = logger.Scoped("logPing") defer func() { if err := recover(); err != nil { logger.Warn("panic", log.String("recover", fmt.Sprintf("%+v", err))) diff --git a/internal/usagestats/code_insights.go b/internal/usagestats/code_insights.go index a1d393d41d5..fee1a7161c4 100644 --- a/internal/usagestats/code_insights.go +++ b/internal/usagestats/code_insights.go @@ -32,7 +32,7 @@ func (p *pingLoader) withOperation(name string, loadFunc pingLoadFunc) { func (p *pingLoader) generate(ctx context.Context, db database.DB) *types.CodeInsightsUsageStatistics { stats := &types.CodeInsightsUsageStatistics{} - logger := log.Scoped("code insights ping loader", "pings for code insights") + logger := log.Scoped("code insights ping loader") for name, loadFunc := range p.operations { err := loadFunc(ctx, db, stats, p.now) diff --git a/internal/usagestats/search_jobs.go b/internal/usagestats/search_jobs.go index 1054910e8c5..9b217339053 100644 --- a/internal/usagestats/search_jobs.go +++ b/internal/usagestats/search_jobs.go @@ -29,7 +29,7 @@ func (p *eventLoader) withOperation(name string, loadFunc evenLoadFunc) { func (p *eventLoader) generate(ctx context.Context, db database.DB) *types.SearchJobsUsageStatistics { stats := &types.SearchJobsUsageStatistics{} - logger := log.Scoped("search jobs ping loader", "pings for search jobs") + logger := log.Scoped("search jobs ping loader") for name, loadFunc := range p.operations { err := loadFunc(ctx, db, stats, p.now) diff --git a/internal/users/update_aggregated_stats_job.go b/internal/users/update_aggregated_stats_job.go index 36946491b06..af15fa4fa3d 100644 --- a/internal/users/update_aggregated_stats_job.go +++ b/internal/users/update_aggregated_stats_job.go @@ -51,7 +51,7 @@ func updateAggregatedUsersStatisticsTable(ctx context.Context, db database.DB) e var started bool func StartUpdateAggregatedUsersStatisticsTable(ctx context.Context, db database.DB) { - logger := log.Scoped("aggregated_user_statistics:cache-refresh", "aggregated_user_statistics cache refresh") + logger := log.Scoped("aggregated_user_statistics:cache-refresh") if started { panic("already started") diff --git a/internal/workerutil/dbworker/metrics.go b/internal/workerutil/dbworker/metrics.go index 84609926855..45ffe1722a7 100644 --- a/internal/workerutil/dbworker/metrics.go +++ b/internal/workerutil/dbworker/metrics.go @@ -19,7 +19,7 @@ func InitPrometheusMetric[T workerutil.Record](observationCtx *observation.Conte teamAndResource = team + "_" + teamAndResource } - logger := observationCtx.Logger.Scoped("InitPrometheusMetric", "") + logger := observationCtx.Logger.Scoped("InitPrometheusMetric") observationCtx.Registerer.MustRegister(prometheus.NewGaugeFunc(prometheus.GaugeOpts{ Name: fmt.Sprintf("src_%s_total", teamAndResource), Help: fmt.Sprintf("Total number of %s records in the queued state.", resource), diff --git a/internal/workerutil/worker.go b/internal/workerutil/worker.go index 216eac5184a..c600d784372 100644 --- a/internal/workerutil/worker.go +++ b/internal/workerutil/worker.go @@ -115,7 +115,7 @@ func newWorker[T Record](ctx context.Context, store Store[T], handler Handler[T] // Initialize the logger if options.Metrics.logger == nil { - options.Metrics.logger = log.Scoped("worker."+options.Name, "a worker process for "+options.WorkerHostname) + options.Metrics.logger = log.Scoped("worker." + options.Name) } options.Metrics.logger = options.Metrics.logger.With(log.String("name", options.Name)) diff --git a/lib/codeintel/tools/lsif-index-tester/main.go b/lib/codeintel/tools/lsif-index-tester/main.go index 5c650e54c67..53b3d81beda 100644 --- a/lib/codeintel/tools/lsif-index-tester/main.go +++ b/lib/codeintel/tools/lsif-index-tester/main.go @@ -85,7 +85,7 @@ func main() { liblog := log.Init(log.Resource{Name: "lsif-index-tester"}) defer liblog.Sync() - logger := log.Scoped(raw_indexer, "indexer testing").With(log.String("directory", directory)) + logger := log.Scoped(raw_indexer).With(log.String("directory", directory)) if raw_indexer == "" { logger.Fatal("Indexer is required. Pass with --indexer") @@ -177,7 +177,7 @@ func testProject(ctx context.Context, logger log.Logger, indexer []string, proje } logger.Debug("... Completed setup project") - result, err := runIndexer(ctx, logger.Scoped("run", "run indexer"), indexer, project, name) + result, err := runIndexer(ctx, logger.Scoped("run"), indexer, project, name) if err != nil { return projectResult{ name: name, @@ -200,7 +200,7 @@ func testProject(ctx context.Context, logger log.Logger, indexer []string, proje } logger.Debug("... Read bundle") - testResult, err := validateTestCases(logger.Scoped("validate", "validate test cases"), project, bundle) + testResult, err := validateTestCases(logger.Scoped("validate"), project, bundle) if err != nil { return projectResult{name: name}, err } diff --git a/monitoring/command/generate.go b/monitoring/command/generate.go index c77fe5bc17f..c5703abe94a 100644 --- a/monitoring/command/generate.go +++ b/monitoring/command/generate.go @@ -111,7 +111,7 @@ func Generate(cmdRoot string, sgRoot string) *cli.Command { return definitions.Default().Names() }), Action: func(c *cli.Context) error { - logger := log.Scoped(c.Command.Name, c.Command.Description) + logger := log.Scoped(c.Command.Name) // expandErr is set from within expandWithSgRoot var expandErr error diff --git a/monitoring/go.mod b/monitoring/go.mod index 25224967521..ed231095638 100644 --- a/monitoring/go.mod +++ b/monitoring/go.mod @@ -9,7 +9,7 @@ require ( github.com/iancoleman/strcase v0.3.0 github.com/prometheus/common v0.37.0 github.com/prometheus/prometheus v0.40.5 - github.com/sourcegraph/log v0.0.0-20221206163500-7d93c6ad7037 + github.com/sourcegraph/log v0.0.0-20231018134238-fbadff7458bb github.com/sourcegraph/sourcegraph/lib v0.0.0-20230613175844-f031949c72f5 github.com/stretchr/testify v1.8.2 github.com/urfave/cli/v2 v2.23.7 @@ -34,7 +34,7 @@ require ( github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dennwc/varint v1.0.0 // indirect - github.com/fatih/color v1.13.0 // indirect + github.com/fatih/color v1.15.0 // indirect github.com/getsentry/sentry-go v0.21.0 // indirect github.com/go-kit/log v0.2.1 // indirect github.com/go-logfmt/logfmt v0.5.1 // indirect @@ -54,7 +54,7 @@ require ( github.com/kr/pretty v0.3.1 // indirect github.com/kr/text v0.2.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.17 // indirect + github.com/mattn/go-isatty v0.0.18 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect @@ -64,9 +64,9 @@ require ( github.com/rogpeppe/go-internal v1.10.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect - go.uber.org/atomic v1.10.0 // indirect + go.uber.org/atomic v1.11.0 // indirect go.uber.org/goleak v1.2.0 // indirect - go.uber.org/multierr v1.8.0 // indirect + go.uber.org/multierr v1.11.0 // indirect go.uber.org/zap v1.24.0 // indirect golang.org/x/crypto v0.7.0 // indirect golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect diff --git a/monitoring/go.sum b/monitoring/go.sum index ac4694b808c..7238e8e9cb9 100644 --- a/monitoring/go.sum +++ b/monitoring/go.sum @@ -33,6 +33,7 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9 dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/CloudyKit/fastprinter v0.0.0-20200109182630-33d98a066a53/go.mod h1:+3IMCy2vIlbG1XG/0ggNQv0SvxCAIpPM5b1nCz56Xno= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/aws/aws-sdk-go v1.44.128 h1:X34pX5t0LIZXjBY11yf9JKMP3c1aZgirh+5PjtaZyJ4= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= @@ -56,12 +57,14 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/cockroachdb/datadriven v1.0.2/go.mod h1:a9RdTaap04u637JoCzcUoIcDmvwSUtcUFtT/C3kJlTU= github.com/cockroachdb/errors v1.11.1 h1:xSEW75zKaKCWzR3OfxXUxgrk/NtT4G1MiOv5lWZazG8= github.com/cockroachdb/errors v1.11.1/go.mod h1:8MUxA3Gi6b25tYlFEBGLf+D8aISL+M4MIpiWMSNRfxw= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b h1:r6VH0faHjZeQy818SGhaone5OnYfxFR/+AzdY3sf5aE= github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b/go.mod h1:Vz9DsVWQQhf3vs21MhPMZpMGSht7O/2vFW2xusFUVOs= github.com/cockroachdb/redact v1.1.5 h1:u1PMllDkdFfPWaNGMyLD1+so+aq3uUItthCFqzwPJ30= github.com/cockroachdb/redact v1.1.5/go.mod h1:BVNblN9mBWFyMyqK1k3AAiSxhvhfK2oOZZ2lK+dpvRg= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= @@ -73,12 +76,15 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dennwc/varint v1.0.0 h1:kGNFFSSw8ToIy3obO/kKr8U9GZYUAxQEVuix4zfDWzE= github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgziApxA= +github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eknkc/amber v0.0.0-20171010120322-cdade1c07385/go.mod h1:0vRUJqYpeSZifjYj7uP3BG/gKcuzL9xWVV/Y+cK33KM= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= -github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.15.0 h1:kOqh6YHBtK8aywxGerMG2Eq3H6Qgoqeo13Bk2Mv/nBs= +github.com/fatih/color v1.15.0/go.mod h1:0h5ZqXfHYED7Bhv2ZJamyIOUej9KtShiJESRwBDUSsw= +github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/getsentry/sentry-go v0.21.0 h1:c9l5F1nPF30JIppulk4veau90PK6Smu3abgVtVQWon4= github.com/getsentry/sentry-go v0.21.0/go.mod h1:lc76E2QywIyW8WuBnwl8Lc4bkmQH4+w1gwTf25trprY= github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= @@ -91,6 +97,7 @@ github.com/go-kit/log v0.2.1/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBj github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= @@ -99,8 +106,10 @@ github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6Wezm github.com/gobwas/ws v1.1.0-rc.5/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= github.com/gobwas/ws v1.1.0 h1:7RFti/xnNkMJnrK7D1yQ/iCIB5OrrY/54/H930kIbHA= github.com/gobwas/ws v1.1.0/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= +github.com/gogo/googleapis v1.4.1/go.mod h1:2lpHqI5OcWCtVElxXnPt+s8oJvMpySlOyM6xDCrzib4= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= +github.com/gogo/status v1.1.0/go.mod h1:BFv9nrluPLmrS0EmGVvLaPNmRosr9KapBYd5/hpY1WM= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -174,6 +183,7 @@ github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= @@ -237,25 +247,25 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= -github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPng= -github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= +github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f h1:KUppIJq7/+SVif2QVs3tOP0zanoHgBEVAwHxUSIzRqU= github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -288,11 +298,14 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/schollz/closestmatch v2.1.0+incompatible/go.mod h1:RtP1ddjLong6gTkbtmuhtR2uUrrJOpYzYRvbcPAid+g= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sourcegraph/log v0.0.0-20221206163500-7d93c6ad7037 h1:hgZHfGYG3KMlDDfACuPgzhMIaEjblCqZ+YltcPgd0tw= -github.com/sourcegraph/log v0.0.0-20221206163500-7d93c6ad7037/go.mod h1:y+sVdVKxHhp489iiiXeZgXIhSpFYijtp/pLD2coj96g= +github.com/sourcegraph/log v0.0.0-20231018072732-c9c6af5e5ade h1:EpakpjinOPDoLLUXfAzdrKc21teqkWJ3xLLTOINb3m8= +github.com/sourcegraph/log v0.0.0-20231018072732-c9c6af5e5ade/go.mod h1:IDp09QkoqS8Z3CyN2RW6vXjgABkNpDbyjLIHNQwQ8P8= +github.com/sourcegraph/log v0.0.0-20231018134238-fbadff7458bb h1:tHKdC+bXxxGJ0cy/R06kg6Z0zqwVGOWMx8uWsIwsaoY= +github.com/sourcegraph/log v0.0.0-20231018134238-fbadff7458bb/go.mod h1:IDp09QkoqS8Z3CyN2RW6vXjgABkNpDbyjLIHNQwQ8P8= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= @@ -309,6 +322,10 @@ github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/urfave/cli/v2 v2.23.7 h1:YHDQ46s3VghFHFf1DdF+Sh7H4RqhcM+t0TmZRJx4oJY= github.com/urfave/cli/v2 v2.23.7/go.mod h1:GHupkWPMM0M/sj1a2b4wUrWBPzazNrIjouW6fmdJLxc= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU= github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -323,14 +340,13 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/atomic v1.10.0 h1:9qC72Qh0+3MqyJbAn8YU5xVq1frD8bn3JtD2oXtafVQ= -go.uber.org/atomic v1.10.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= +go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= +go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= go.uber.org/goleak v1.2.0/go.mod h1:XJYK+MuIchqpmGmUSAzotztawfKvYLUIgg7guXrwVUo= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= -go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= +go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.24.0 h1:FiJd5l1UOLj0wCgbSE0rwwXHzEdAZS6hiiSnxJN/D60= @@ -449,7 +465,6 @@ golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -472,6 +487,7 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -627,7 +643,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= diff --git a/monitoring/main.go b/monitoring/main.go index 657eca1a80a..619b3554a27 100644 --- a/monitoring/main.go +++ b/monitoring/main.go @@ -26,7 +26,7 @@ func main() { liblog := log.Init(log.Resource{Name: "monitoring-generator"}) defer liblog.Sync() - logger := log.Scoped("monitoring", "main Sourcegraph monitoring entrypoint") + logger := log.Scoped("monitoring") // Create an app that only runs the generate command app := &cli.App{ diff --git a/monitoring/monitoring/generator.go b/monitoring/monitoring/generator.go index 2beb41bf328..12ff5998897 100644 --- a/monitoring/monitoring/generator.go +++ b/monitoring/monitoring/generator.go @@ -86,7 +86,7 @@ func Generate(logger log.Logger, opts GenerateOptions, dashboards ...*Dashboard) var grafanaClient *grafanasdk.Client var grafanaFolderID int if opts.GrafanaURL != "" && opts.Reload { - gclog := logger.Scoped("grafana.client", "grafana client setup") + gclog := logger.Scoped("grafana.client") var err error grafanaClient, err = grafanaclient.New(opts.GrafanaURL, opts.GrafanaCredentials, opts.GrafanaHeaders) @@ -146,7 +146,7 @@ func Generate(logger log.Logger, opts GenerateOptions, dashboards ...*Dashboard) var generatedAssets []string var err error if len(opts.MultiInstanceDashboardGroupings) > 0 { - l := logger.Scoped("multi-instance", "multi-instance dashboards") + l := logger.Scoped("multi-instance") l.Info("generating multi-instance") generatedAssets, err = generateMultiInstance(ctx, l, grafanaClient, grafanaFolderID, dashboards, opts) } else { @@ -212,7 +212,7 @@ func generateAll( // Logger for dashboard dlog := logger.With(log.String("dashboard", dashboard.Name)) - glog := dlog.Scoped("grafana", "grafana dashboard generation"). + glog := dlog.Scoped("grafana"). With(log.String("instance", opts.GrafanaURL)) glog.Debug("Rendering Grafana assets") @@ -251,7 +251,7 @@ func generateAll( // Prepare Prometheus assets if opts.PrometheusDir != "" { - plog := dlog.Scoped("prometheus", "prometheus rules generation") + plog := dlog.Scoped("prometheus") plog.Debug("Rendering Prometheus assets") promAlertsFile, err := dashboard.RenderPrometheusRules(opts.InjectLabelMatchers) @@ -291,7 +291,7 @@ func generateAll( // Reload all Prometheus rules if opts.PrometheusDir != "" && opts.PrometheusURL != "" && opts.Reload { - rlog := logger.Scoped("prometheus", "prometheus alerts generation"). + rlog := logger.Scoped("prometheus"). With(log.String("instance", opts.PrometheusURL)) // Reload all Prometheus rules rlog.Debug("Reloading Prometheus instance")